diff --git a/compiler-rs/clients_schema_to_openapi/src/schemas.rs b/compiler-rs/clients_schema_to_openapi/src/schemas.rs index 02f6674217..052e2194f3 100644 --- a/compiler-rs/clients_schema_to_openapi/src/schemas.rs +++ b/compiler-rs/clients_schema_to_openapi/src/schemas.rs @@ -472,9 +472,9 @@ impl<'a> TypesAndComponents<'a> { data.external_docs = self.convert_external_docs(prop); data.deprecated = prop.deprecation.is_some(); data.description = self.property_description(prop)?; + data.default = prop.server_default.clone().map(|value| { serde_json::json!(value) }); data.extensions = crate::availability_as_extensions(&prop.availability, &self.config.flavor); // TODO: prop.aliases as extensions - // TODO: prop.server_default as extension // TODO: prop.doc_id as extension (new representation of since and stability) // TODO: prop.es_quirk as extension? // TODO: prop.codegen_name as extension? diff --git a/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm b/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm index 945ce7e430..9e880de336 100644 Binary files a/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm and b/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm differ diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 6f8f5a2148..ab22e1eac4 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -3049,6 +3049,7 @@ }, "max_outstanding_read_requests": { "description": "The maximum number of outstanding reads requests from the remote cluster.", + "default": 12.0, "type": "number" }, "settings": { @@ -3060,6 +3061,7 @@ }, "max_outstanding_write_requests": { "description": "The maximum number of outstanding reads requests from the remote cluster.", + "default": 9.0, "type": "number" }, "read_poll_timeout": { @@ -3067,6 +3069,7 @@ }, "max_read_request_operation_count": { "description": "The maximum number of operations to pull per read from the remote cluster.", + "default": 5120.0, "type": "number" }, "max_read_request_size": { @@ -3077,6 +3080,7 @@ }, "max_write_buffer_count": { "description": "The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit.", + "default": 2147483647.0, "type": "number" }, "max_write_buffer_size": { @@ -3084,6 +3088,7 @@ }, "max_write_request_operation_count": { "description": "The maximum number of operations per bulk write request executed on the follower.", + "default": 5120.0, "type": "number" }, "max_write_request_size": { @@ -9821,6 +9826,7 @@ }, "include_ccs_metadata": { "description": "When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters`\nobject with information about the clusters that participated in the search along with info such as shards\ncount.", + "default": false, "type": "boolean" }, "wait_for_completion_timeout": { @@ -9831,6 +9837,7 @@ }, "keep_on_completion": { "description": "Indicates whether the query and its results are stored in the cluster.\nIf false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter.", + "default": false, "type": "boolean" } }, @@ -10119,6 +10126,7 @@ }, "include_ccs_metadata": { "description": "When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters`\nobject with information about the clusters that participated in the search along with info such as shards\ncount.", + "default": false, "type": "boolean" } }, @@ -14148,6 +14156,7 @@ }, "enabled": { "description": "If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle\nthat's disabled (enabled: `false`) will have no effect on the data stream.", + "default": true, "type": "boolean" } } @@ -20556,6 +20565,7 @@ }, "deprecated": { "description": "Marks this ingest pipeline as deprecated.\nWhen a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.", + "default": false, "type": "boolean" } } @@ -21875,10 +21885,12 @@ "properties": { "allow_no_match": { "description": "Refer to the description for the `allow_no_match` query parameter.", + "default": true, "type": "boolean" }, "force": { "description": "Refer to the descriptiion for the `force` query parameter.", + "default": false, "type": "boolean" }, "timeout": { @@ -22339,6 +22351,7 @@ "properties": { "allow_lazy_start": { "description": "Specifies whether this job can start when there is insufficient machine\nlearning node capacity for it to be immediately assigned to a node. If\nset to `false` and a machine learning node with capacity to run the job\ncannot be immediately found, the API returns an error. If set to `true`,\nthe API does not return an error; the job waits in the `starting` state\nuntil sufficient machine learning node capacity is available. This\nbehavior is also affected by the cluster-wide\n`xpack.ml.max_lazy_ml_nodes` setting.", + "default": false, "type": "boolean" }, "analysis": { @@ -22356,6 +22369,7 @@ }, "max_num_threads": { "description": "The maximum number of threads to be used by the analysis. Using more\nthreads may decrease the time necessary to complete the analysis at the\ncost of using more CPU. Note that the process may use additional threads\nfor operational functionality other than the analysis itself.", + "default": 1.0, "type": "number" }, "_meta": { @@ -22363,6 +22377,7 @@ }, "model_memory_limit": { "description": "The approximate maximum amount of memory resources that are permitted for\nanalytical processing. If your `elasticsearch.yml` file contains an\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try\nto create data frame analytics jobs that have `model_memory_limit` values\ngreater than that setting.", + "default": "1gb", "type": "string" }, "source": { @@ -22660,6 +22675,7 @@ }, "scroll_size": { "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.\nThe maximum value is the value of `index.max_result_window`, which is 10,000 by default.", + "default": 1000.0, "type": "number" }, "headers": { @@ -23085,6 +23101,7 @@ }, "max_model_memory": { "description": "Refer to the description for the `max_model_memory` query parameter.", + "default": "20mb", "type": "string" } } @@ -23278,6 +23295,7 @@ "properties": { "allow_lazy_open": { "description": "Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available.", + "default": false, "type": "boolean" }, "analysis_config": { @@ -23294,6 +23312,7 @@ }, "daily_model_snapshot_retention_after_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`.", + "default": 1.0, "type": "number" }, "data_description": { @@ -23321,6 +23340,7 @@ }, "model_snapshot_retention_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted.", + "default": 10.0, "type": "number" }, "renormalization_window_days": { @@ -26653,6 +26673,7 @@ "properties": { "delete_intervening_results": { "description": "Refer to the description for the `delete_intervening_results` query parameter.", + "default": false, "type": "boolean" } } @@ -27174,10 +27195,12 @@ "properties": { "allow_no_match": { "description": "Refer to the description for the `allow_no_match` query parameter.", + "default": true, "type": "boolean" }, "force": { "description": "Refer to the description for the `force` query parameter.", + "default": false, "type": "boolean" }, "timeout": { @@ -27316,14 +27339,17 @@ }, "model_memory_limit": { "description": "The approximate maximum amount of memory resources that are permitted for\nanalytical processing. If your `elasticsearch.yml` file contains an\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try\nto create data frame analytics jobs that have `model_memory_limit` values\ngreater than that setting.", + "default": "1gb", "type": "string" }, "max_num_threads": { "description": "The maximum number of threads to be used by the analysis. Using more\nthreads may decrease the time necessary to complete the analysis at the\ncost of using more CPU. Note that the process may use additional threads\nfor operational functionality other than the analysis itself.", + "default": 1.0, "type": "number" }, "allow_lazy_start": { "description": "Specifies whether this job can start when there is insufficient machine\nlearning node capacity for it to be immediately assigned to a node.", + "default": false, "type": "boolean" } } @@ -27521,6 +27547,7 @@ }, "scroll_size": { "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.\nThe maximum value is the value of `index.max_result_window`.", + "default": 1000.0, "type": "number" } } @@ -27737,6 +27764,7 @@ "properties": { "allow_lazy_open": { "description": "Advanced configuration option. Specifies whether this job can open when\nthere is insufficient machine learning node capacity for it to be\nimmediately assigned to a node. If `false` and a machine learning node\nwith capacity to run the job cannot immediately be found, the open\nanomaly detection jobs API returns an error. However, this is also\nsubject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this\noption is set to `true`, the open anomaly detection jobs API does not\nreturn an error and the job waits in the opening state until sufficient\nmachine learning node capacity is available.", + "default": false, "type": "boolean" }, "analysis_limits": { @@ -27770,10 +27798,12 @@ }, "daily_model_snapshot_retention_after_days": { "description": "Advanced configuration option, which affects the automatic removal of old\nmodel snapshots for this job. It specifies a period of time (in days)\nafter which only the first snapshot per day is retained. This period is\nrelative to the timestamp of the most recent snapshot for this job. Valid\nvalues range from 0 to `model_snapshot_retention_days`. For jobs created\nbefore version 7.8.0, the default value matches\n`model_snapshot_retention_days`.", + "default": 1.0, "type": "number" }, "model_snapshot_retention_days": { "description": "Advanced configuration option, which affects the automatic removal of old\nmodel snapshots for this job. It specifies the maximum period of time (in\ndays) that snapshots are retained. This period is relative to the\ntimestamp of the most recent snapshot for this job.", + "default": 10.0, "type": "number" }, "renormalization_window_days": { @@ -27957,6 +27987,7 @@ }, "retain": { "description": "If `true`, this snapshot will not be deleted during automatic cleanup of\nsnapshots older than `model_snapshot_retention_days`. However, this\nsnapshot will be deleted when the job is deleted.", + "default": false, "type": "boolean" } } @@ -28038,6 +28069,7 @@ "properties": { "number_of_allocations": { "description": "The number of model allocations on each node where the model is deployed.\nAll allocations on a node share the same copy of the model in memory but use\na separate set of threads to evaluate the model.\nIncreasing this value generally increases the throughput.\nIf this setting is greater than the number of hardware threads\nit will automatically be changed to a value less than the number of hardware threads.\nIf adaptive_allocations is enabled, do not set this value, because it’s automatically set.", + "default": 1.0, "type": "number" }, "adaptive_allocations": { @@ -30545,6 +30577,66 @@ "summary": "Reindex multiple sources", "description": "Run `POST _reindex` to reindex from multiple sources. The `index` attribute in source can be a list, which enables you to copy from lots of sources in one request. This example copies documents from the `my-index-000001` and `my-index-000002` indices.\n", "value": "{\n \"source\": {\n \"index\": [\"my-index-000001\", \"my-index-000002\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000002\"\n }\n}" + }, + "ReindexRequestExample10": { + "summary": "Reindex with Painless", + "description": "You can use Painless to reindex daily indices to apply a new template to the existing documents. The script extracts the date from the index name and creates a new index with `-1` appended. For example, all data from `metricbeat-2016.05.31` will be reindexed into `metricbeat-2016.05.31-1`.\n", + "value": "{\n \"source\": {\n \"index\": \"metricbeat-*\"\n },\n \"dest\": {\n \"index\": \"metricbeat\"\n },\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\"\n }\n}" + }, + "ReindexRequestExample11": { + "summary": "Reindex a random subset", + "description": "Run `POST _reindex` to extract a random subset of the source for testing. You might need to adjust the `min_score` value depending on the relative amount of data extracted from source.\n", + "value": "{\n \"max_docs\": 10,\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"function_score\" : {\n \"random_score\" : {},\n \"min_score\" : 0.9\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample12": { + "summary": "Reindex modified documents", + "description": "Run `POST _reindex` to modify documents during reindexing. This example bumps the version of the source document.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\",\n \"version_type\": \"external\"\n },\n \"script\": {\n \"source\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n \"lang\": \"painless\"\n }\n}" + }, + "ReindexRequestExample13": { + "summary": "Reindex from remote on Elastic Cloud", + "description": "When using Elastic Cloud, you can run `POST _reindex` and authenticate against a remote cluster with an API key.\n", + "value": "{\n \"source\": {\n \"remote\": {\n \"host\": \"http://otherhost:9200\",\n \"username\": \"user\",\n \"password\": \"pass\"\n },\n \"index\": \"my-index-000001\",\n \"query\": {\n \"match\": {\n \"test\": \"data\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample2": { + "summary": "Manual slicing", + "description": "Run `POST _reindex` to slice a reindex request manually. Provide a slice ID and total number of slices to each request.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample3": { + "summary": "Automatic slicing", + "description": "Run `POST _reindex?slices=5&refresh` to automatically parallelize using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample4": { + "summary": "Routing", + "description": "By default if reindex sees a document with routing then the routing is preserved unless it's changed by the script. You can set `routing` on the `dest` request to change this behavior. In this example, run `POST _reindex` to copy all documents from the `source` with the company name `cat` into the `dest` with routing set to `cat`.\n", + "value": "{\n \"source\": {\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}" + }, + "ReindexRequestExample5": { + "summary": "Ingest pipelines", + "description": "Run `POST _reindex` and use the ingest pipelines feature.", + "value": "{\n \"source\": {\n \"index\": \"source\"\n },\n \"dest\": {\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n }\n}" + }, + "ReindexRequestExample6": { + "summary": "Reindex with a query", + "description": "Run `POST _reindex` and add a query to the `source` to limit the documents to reindex. For example, this request copies documents into `my-new-index-000001` only if they have a `user.id` of `kimchy`.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample7": { + "summary": "Reindex with max_docs", + "description": "You can limit the number of processed documents by setting `max_docs`. For example, run `POST _reindex` to copy a single document from `my-index-000001` to `my-new-index-000001`.\n", + "value": "{\n \"max_docs\": 1,\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample8": { + "summary": "Reindex selected fields", + "description": "You can use source filtering to reindex a subset of the fields in the original documents. For example, run `POST _reindex` the reindex only the `user.id` and `_doc` fields of each document.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"_source\": [\"user.id\", \"_doc\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample9": { + "summary": "Reindex new field names", + "description": "A reindex operation can build a copy of an index with renamed fields. If your index has documents with `text` and `flag` fields, you can change the latter field name to `tag` during the reindex.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n },\n \"script\": {\n \"source\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n }\n}" } } } @@ -31062,7 +31154,11 @@ "rollup" ], "summary": "Search rolled-up data", - "description": "The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data.\nIt rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.\n\nThe request body supports a subset of features from the regular search API.\nThe following functionality is not available:\n\n`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.\n`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed.\n\n**Searching both historical rollup and non-rollup data**\n\nThe rollup search API has the capability to search across both \"live\" non-rollup data and the aggregated rollup data.\nThis is done by simply adding the live indices to the URI. For example:\n\n```\nGET sensor-1,sensor_rollup/_rollup_search\n{\n \"size\": 0,\n \"aggregations\": {\n \"max_temperature\": {\n \"max\": {\n \"field\": \"temperature\"\n }\n }\n }\n}\n```\n\nThe rollup search endpoint does two things when the search runs:\n\n* The original request is sent to the non-rollup index unaltered.\n* A rewritten version of the original request is sent to the rollup index.\n\nWhen the two responses are received, the endpoint rewrites the rollup response and merges the two together.\nDuring the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.", + "description": "The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data.\nIt rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.\n\nThe request body supports a subset of features from the regular search API.\nThe following functionality is not available:\n\n`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.\n`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed.\n\nFor more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation.", + "externalDocs": { + "url": "https://www.elastic.co/docs/manage-data/lifecycle/rollup/getting-started-api#historical-only-search-example", + "x-previousVersionUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/rollup-search.html" + }, "operationId": "rollup-rollup-search", "parameters": [ { @@ -31092,7 +31188,11 @@ "rollup" ], "summary": "Search rolled-up data", - "description": "The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data.\nIt rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.\n\nThe request body supports a subset of features from the regular search API.\nThe following functionality is not available:\n\n`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.\n`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed.\n\n**Searching both historical rollup and non-rollup data**\n\nThe rollup search API has the capability to search across both \"live\" non-rollup data and the aggregated rollup data.\nThis is done by simply adding the live indices to the URI. For example:\n\n```\nGET sensor-1,sensor_rollup/_rollup_search\n{\n \"size\": 0,\n \"aggregations\": {\n \"max_temperature\": {\n \"max\": {\n \"field\": \"temperature\"\n }\n }\n }\n}\n```\n\nThe rollup search endpoint does two things when the search runs:\n\n* The original request is sent to the non-rollup index unaltered.\n* A rewritten version of the original request is sent to the rollup index.\n\nWhen the two responses are received, the endpoint rewrites the rollup response and merges the two together.\nDuring the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.", + "description": "The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data.\nIt rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.\n\nThe request body supports a subset of features from the regular search API.\nThe following functionality is not available:\n\n`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.\n`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed.\n\nFor more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation.", + "externalDocs": { + "url": "https://www.elastic.co/docs/manage-data/lifecycle/rollup/getting-started-api#historical-only-search-example", + "x-previousVersionUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/rollup-search.html" + }, "operationId": "rollup-rollup-search-1", "parameters": [ { @@ -34405,6 +34505,7 @@ }, "owner": { "description": "Query API keys owned by the currently authenticated user.\nThe `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones.\n\nNOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`.", + "default": false, "type": "boolean" }, "realm_name": { @@ -39956,14 +40057,17 @@ }, "ignore_unavailable": { "description": "If `true`, the request ignores any index or data stream in indices that's missing from the snapshot.\nIf `false`, the request returns an error for any missing index or data stream.", + "default": false, "type": "boolean" }, "include_aliases": { "description": "If `true`, the request restores aliases for any restored data streams and indices.\nIf `false`, the request doesn’t restore aliases.", + "default": true, "type": "boolean" }, "include_global_state": { "description": "If `true`, restore the cluster state. The cluster state includes:\n\n* Persistent cluster settings\n* Index templates\n* Legacy index templates\n* Ingest pipelines\n* Index lifecycle management (ILM) policies\n* Stored scripts\n* For snapshots taken after 7.12.0, feature states\n\nIf `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot.\nIt completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot.\n\nUse the `feature_states` parameter to configure how feature states are restored.\n\nIf `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail.", + "default": false, "type": "boolean" }, "index_settings": { @@ -39974,6 +40078,7 @@ }, "partial": { "description": "If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available.\n\nIf true, it allows restoring a partial snapshot of indices with unavailable shards.\nOnly shards that were successfully included in the snapshot will be restored.\nAll missing shards will be recreated as empty.", + "default": false, "type": "boolean" }, "rename_pattern": { @@ -43280,7 +43385,11 @@ "document" ], "summary": "Update a document", - "description": "Update a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).\n\n## Required authorization\n\n* Index privileges: `write`\n", + "description": "Update a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).\nFor usage examples such as partial updates, upserts, and scripted updates, see the External documentation.\n\n## Required authorization\n\n* Index privileges: `write`\n", + "externalDocs": { + "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-document", + "x-previousVersionUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/docs-update.html" + }, "operationId": "update", "parameters": [ { @@ -43444,6 +43553,7 @@ "properties": { "detect_noop": { "description": "If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document.", + "default": true, "type": "boolean" }, "doc": { @@ -43452,6 +43562,7 @@ }, "doc_as_upsert": { "description": "If `true`, use the contents of 'doc' as the value of 'upsert'.\nNOTE: Using ingest pipelines with `doc_as_upsert` is not supported.", + "default": false, "type": "boolean" }, "script": { @@ -43459,6 +43570,7 @@ }, "scripted_upsert": { "description": "If `true`, run the script whether or not the document exists.", + "default": false, "type": "boolean" }, "_source": { @@ -43560,7 +43672,11 @@ "document" ], "summary": "Update documents", - "description": "Updates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.\n\n## Required authorization\n\n* Index privileges: `read`,`write`\n", + "description": "Updates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Refreshing shards**\n\nSpecifying the `refresh` parameter refreshes all shards once the request completes.\nThis is different to the update API's `refresh` parameter, which causes only the shard\nthat received the request to be refreshed. Unlike the update API, it does not support\n`wait_for`.\n\n**Running update by query asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch\nperforms some preflight checks, launches the request, and returns a\n[task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`.\n\n**Waiting for active shards**\n\n`wait_for_active_shards` controls how many copies of a shard must be active\nbefore proceeding with the request. See [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards)\nfor details. `timeout` controls how long each write request waits for unavailable\nshards to become available. Both work exactly the way they work in the\n[Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). Update by query uses scrolled searches, so you can also\nspecify the `scroll` parameter to control how long it keeps the search context\nalive, for example `?scroll=10m`. The default is 5 minutes.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\nRefer to the linked documentation for examples of how to update documents using the `_update_by_query` API:\n\n## Required authorization\n\n* Index privileges: `read`,`write`\n", + "externalDocs": { + "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-by-query-api", + "x-previousVersionUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/docs-update-by-query.html" + }, "operationId": "update-by-query", "parameters": [ { @@ -51074,6 +51190,7 @@ "properties": { "boost": { "description": "Floating point number used to decrease or increase the relevance scores of the query.\nBoost values are relative to the default value of 1.0.\nA boost value between 0 and 1.0 decreases the relevance score.\nA value greater than 1.0 increases the relevance score.", + "default": 1.0, "type": "number" }, "_name": { @@ -51162,6 +51279,7 @@ }, "auto_generate_synonyms_phrase_query": { "description": "If true, match phrase queries are automatically created for multi-term synonyms.", + "default": true, "type": "boolean" }, "operator": { @@ -51230,6 +51348,7 @@ }, "tie_breaker": { "description": "Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses.", + "default": 0.0, "type": "number" } }, @@ -51581,6 +51700,7 @@ }, "factor": { "description": "Optional factor to multiply the field value with.", + "default": 1.0, "type": "number" }, "missing": { @@ -51694,6 +51814,7 @@ }, "explain": { "description": "If `true`, the request returns detailed information about score computation as part of a hit.", + "default": false, "type": "boolean" }, "ext": { @@ -51705,6 +51826,7 @@ }, "from": { "description": "The starting document offset, which must be non-negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 0.0, "type": "number" }, "highlight": { @@ -51768,6 +51890,7 @@ }, "profile": { "description": "Set to `true` to return detailed timing information about the execution of individual components in a search request.\nNOTE: This is a debugging tool and adds significant overhead to search execution.", + "default": false, "type": "boolean" }, "query": { @@ -51802,6 +51925,7 @@ }, "size": { "description": "The number of hits to return, which must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` property.", + "default": 10.0, "type": "number" }, "slice": { @@ -51825,6 +51949,7 @@ }, "terminate_after": { "description": "The maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\n\nIMPORTANT: Use with caution.\nElasticsearch applies this property to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this property for requests that target data streams with backing indices across multiple data tiers.\n\nIf set to `0` (default), the query does not terminate early.", + "default": 0.0, "type": "number" }, "timeout": { @@ -51833,10 +51958,12 @@ }, "track_scores": { "description": "If `true`, calculate and return document scores, even if the scores are not used for sorting.", + "default": false, "type": "boolean" }, "version": { "description": "If `true`, the request returns the document version as part of a hit.", + "default": false, "type": "boolean" }, "seq_no_primary_term": { @@ -51904,10 +52031,12 @@ }, "size": { "description": "The maximum number of hits to return per `inner_hits`.", + "default": 3.0, "type": "number" }, "from": { "description": "Inner hit starting document offset.", + "default": 0.0, "type": "number" }, "collapse": { @@ -51953,6 +52082,7 @@ "$ref": "#/components/schemas/_types.Fields" }, "track_scores": { + "default": false, "type": "boolean" }, "version": { @@ -52039,10 +52169,12 @@ }, "boundary_chars": { "description": "A string that contains each boundary character.", + "default": ".,!? \\t\\n", "type": "string" }, "boundary_max_scan": { "description": "How far to scan for boundary characters.", + "default": 20.0, "type": "number" }, "boundary_scanner": { @@ -52050,6 +52182,7 @@ }, "boundary_scanner_locale": { "description": "Controls which locale is used to search for sentence and word boundaries.\nThis parameter takes a form of a language tag, for example: `\"en-US\"`, `\"fr-FR\"`, `\"ja-JP\"`.", + "default": "Locale.ROOT", "type": "string" }, "force_source": { @@ -52061,6 +52194,7 @@ }, "fragment_size": { "description": "The size of the highlighted fragment in characters.", + "default": 100.0, "type": "number" }, "highlight_filter": { @@ -52078,10 +52212,12 @@ }, "no_match_size": { "description": "The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight.", + "default": 0.0, "type": "number" }, "number_of_fragments": { "description": "The maximum number of fragments to return.\nIf the number of fragments is set to `0`, no fragments are returned.\nInstead, the entire field contents are highlighted and returned.\nThis can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required.\nIf `number_of_fragments` is `0`, `fragment_size` is ignored.", + "default": 5.0, "type": "number" }, "options": { @@ -52095,6 +52231,7 @@ }, "phrase_limit": { "description": "Controls the number of matching phrases in a document that are considered.\nPrevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory.\nWhen using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory.\nOnly supported by the `fvh` highlighter.", + "default": 256.0, "type": "number" }, "post_tags": { @@ -52113,6 +52250,7 @@ }, "require_field_match": { "description": "By default, only fields that contains a query match are highlighted.\nSet to `false` to highlight all fields.", + "default": true, "type": "boolean" }, "tags_schema": { @@ -52522,10 +52660,12 @@ }, "query_weight": { "description": "Relative importance of the original query versus the rescore query.", + "default": 1.0, "type": "number" }, "rescore_query_weight": { "description": "Relative importance of the rescore query versus the original query.", + "default": 1.0, "type": "number" }, "score_mode": { @@ -53093,10 +53233,12 @@ "properties": { "max_expansions": { "description": "Maximum number of variations created.", + "default": 50.0, "type": "number" }, "prefix_length": { "description": "Number of beginning characters left unchanged when creating expansions.", + "default": 0.0, "type": "number" }, "rewrite": { @@ -53104,6 +53246,7 @@ }, "transpositions": { "description": "Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`).", + "default": true, "type": "boolean" }, "fuzziness": { @@ -53159,6 +53302,7 @@ }, "ignore_unmapped": { "description": "Set to `true` to ignore an unmapped field and not match any documents for this query.\nSet to `false` to throw an exception if the field is not mapped.", + "default": false, "type": "boolean" } } @@ -53199,6 +53343,7 @@ }, "ignore_unmapped": { "description": "Set to `true` to ignore an unmapped field and not match any documents for this query.\nSet to `false` to throw an exception if the field is not mapped.", + "default": false, "type": "boolean" } }, @@ -53259,6 +53404,7 @@ "properties": { "ignore_unmapped": { "description": "Set to `true` to ignore an unmapped field and not match any documents for this query.\nSet to `false` to throw an exception if the field is not mapped.", + "default": false, "type": "boolean" } } @@ -53275,6 +53421,7 @@ "properties": { "ignore_unmapped": { "description": "Indicates whether to ignore an unmapped `type` and not return any documents instead of an error.", + "default": false, "type": "boolean" }, "inner_hits": { @@ -53328,6 +53475,7 @@ "properties": { "ignore_unmapped": { "description": "Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error.\nYou can use this parameter to query multiple indices that may not contain the `parent_type`.", + "default": false, "type": "boolean" }, "inner_hits": { @@ -53341,6 +53489,7 @@ }, "score": { "description": "Indicates whether the relevance score of a matching parent document is aggregated into its child documents.", + "default": false, "type": "boolean" } }, @@ -53432,10 +53581,12 @@ }, "max_gaps": { "description": "Maximum number of positions between the matching terms.\nIntervals produced by the rules further apart than this are not considered matches.", + "default": -1.0, "type": "number" }, "ordered": { "description": "If `true`, intervals produced by the rules should appear in the order in which they are specified.", + "default": false, "type": "boolean" }, "filter": { @@ -53541,6 +53692,7 @@ }, "prefix_length": { "description": "Number of beginning characters left unchanged when creating expansions.", + "default": 0.0, "type": "number" }, "term": { @@ -53549,6 +53701,7 @@ }, "transpositions": { "description": "Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`).", + "default": true, "type": "boolean" }, "use_field": { @@ -53568,10 +53721,12 @@ }, "max_gaps": { "description": "Maximum number of positions between the matching terms.\nTerms further apart than this are not considered matches.", + "default": -1.0, "type": "number" }, "ordered": { "description": "If `true`, matching terms must appear in their specified order.", + "default": false, "type": "boolean" }, "query": { @@ -53741,6 +53896,7 @@ }, "auto_generate_synonyms_phrase_query": { "description": "If `true`, match phrase queries are automatically created for multi-term synonyms.", + "default": true, "type": "boolean" }, "cutoff_frequency": { @@ -53755,14 +53911,17 @@ }, "fuzzy_transpositions": { "description": "If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`).", + "default": true, "type": "boolean" }, "lenient": { "description": "If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored.", + "default": false, "type": "boolean" }, "max_expansions": { "description": "Maximum number of terms to which the query will expand.", + "default": 50.0, "type": "number" }, "minimum_should_match": { @@ -53773,6 +53932,7 @@ }, "prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.", + "default": 0.0, "type": "number" }, "query": { @@ -53836,10 +53996,12 @@ }, "fuzzy_transpositions": { "description": "If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`).\nCan be applied to the term subqueries constructed for all terms but the final term.", + "default": true, "type": "boolean" }, "max_expansions": { "description": "Maximum number of terms to which the query will expand.\nCan be applied to the term subqueries constructed for all terms but the final term.", + "default": 50.0, "type": "number" }, "minimum_should_match": { @@ -53850,6 +54012,7 @@ }, "prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.\nCan be applied to the term subqueries constructed for all terms but the final term.", + "default": 0.0, "type": "number" }, "query": { @@ -53891,6 +54054,7 @@ }, "slop": { "description": "Maximum number of positions allowed between matching tokens.", + "default": 0.0, "type": "number" }, "zero_terms_query": { @@ -53917,6 +54081,7 @@ }, "max_expansions": { "description": "Maximum number of terms to which the last provided term of the query value will expand.", + "default": 50.0, "type": "number" }, "query": { @@ -53925,6 +54090,7 @@ }, "slop": { "description": "Maximum number of positions allowed between matching tokens.", + "default": 0.0, "type": "number" }, "zero_terms_query": { @@ -53954,10 +54120,12 @@ }, "boost_terms": { "description": "Each term in the formed query could be further boosted by their tf-idf score.\nThis sets the boost factor to use when using this feature.\nDefaults to deactivated (0).", + "default": 0.0, "type": "number" }, "fail_on_unsupported_field": { "description": "Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`).", + "default": true, "type": "boolean" }, "fields": { @@ -53969,6 +54137,7 @@ }, "include": { "description": "Specifies whether the input documents should also be included in the search results returned.", + "default": false, "type": "boolean" }, "like": { @@ -53991,14 +54160,17 @@ }, "max_query_terms": { "description": "The maximum number of query terms that can be selected.", + "default": 25.0, "type": "number" }, "max_word_length": { "description": "The maximum word length above which the terms are ignored.\nDefaults to unbounded (`0`).", + "default": 0.0, "type": "number" }, "min_doc_freq": { "description": "The minimum document frequency below which the terms are ignored from the input document.", + "default": 5.0, "type": "number" }, "minimum_should_match": { @@ -54006,10 +54178,12 @@ }, "min_term_freq": { "description": "The minimum term frequency below which the terms are ignored from the input document.", + "default": 2.0, "type": "number" }, "min_word_length": { "description": "The minimum word length below which the terms are ignored.", + "default": 0.0, "type": "number" }, "routing": { @@ -54172,6 +54346,7 @@ }, "auto_generate_synonyms_phrase_query": { "description": "If `true`, match phrase queries are automatically created for multi-term synonyms.", + "default": true, "type": "boolean" }, "cutoff_frequency": { @@ -54189,14 +54364,17 @@ }, "fuzzy_transpositions": { "description": "If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`).\nCan be applied to the term subqueries constructed for all terms but the final term.", + "default": true, "type": "boolean" }, "lenient": { "description": "If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored.", + "default": false, "type": "boolean" }, "max_expansions": { "description": "Maximum number of terms to which the query will expand.", + "default": 50.0, "type": "number" }, "minimum_should_match": { @@ -54207,6 +54385,7 @@ }, "prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.", + "default": 0.0, "type": "number" }, "query": { @@ -54215,10 +54394,12 @@ }, "slop": { "description": "Maximum number of positions allowed between matching tokens.", + "default": 0.0, "type": "number" }, "tie_breaker": { "description": "Determines how scores for each per-term blended query and scores across groups are combined.", + "default": 0.0, "type": "number" }, "type": { @@ -54255,6 +54436,7 @@ "properties": { "ignore_unmapped": { "description": "Indicates whether to ignore an unmapped path and not return any documents instead of an error.", + "default": false, "type": "boolean" }, "inner_hits": { @@ -54290,6 +54472,7 @@ }, "ignore_unmapped": { "description": "Indicates whether to ignore an unmapped `type` and not return any documents instead of an error.", + "default": false, "type": "boolean" }, "type": { @@ -54425,6 +54608,7 @@ }, "case_insensitive": { "description": "Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`.\nDefault is `false` which means the case sensitivity of matching depends on the underlying field’s mapping.", + "default": false, "x-state": "Generally available; Added in 7.10.0", "type": "boolean" } @@ -54445,6 +54629,7 @@ "properties": { "allow_leading_wildcard": { "description": "If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string.", + "default": true, "type": "boolean" }, "analyzer": { @@ -54453,10 +54638,12 @@ }, "analyze_wildcard": { "description": "If `true`, the query attempts to analyze wildcard terms in the query string.", + "default": false, "type": "boolean" }, "auto_generate_synonyms_phrase_query": { "description": "If `true`, match phrase queries are automatically created for multi-term synonyms.", + "default": true, "type": "boolean" }, "default_field": { @@ -54467,9 +54654,11 @@ }, "enable_position_increments": { "description": "If `true`, enable position increments in queries constructed from a `query_string` search.", + "default": true, "type": "boolean" }, "escape": { + "default": false, "type": "boolean" }, "fields": { @@ -54484,10 +54673,12 @@ }, "fuzzy_max_expansions": { "description": "Maximum number of terms to which the query expands for fuzzy matching.", + "default": 50.0, "type": "number" }, "fuzzy_prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.", + "default": 0.0, "type": "number" }, "fuzzy_rewrite": { @@ -54495,14 +54686,17 @@ }, "fuzzy_transpositions": { "description": "If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`).", + "default": true, "type": "boolean" }, "lenient": { "description": "If `true`, format-based errors, such as providing a text value for a numeric field, are ignored.", + "default": false, "type": "boolean" }, "max_determinized_states": { "description": "Maximum number of automaton states required for the query.", + "default": 10000.0, "type": "number" }, "minimum_should_match": { @@ -54510,6 +54704,7 @@ }, "phrase_slop": { "description": "Maximum number of positions allowed between matching tokens for phrases.", + "default": 0.0, "type": "number" }, "query": { @@ -54865,6 +55060,7 @@ "properties": { "case_insensitive": { "description": "Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`.\nWhen `false`, case sensitivity of matching depends on the underlying field’s mapping.", + "default": false, "x-state": "Generally available; Added in 7.10.0", "type": "boolean" }, @@ -54874,6 +55070,7 @@ }, "max_determinized_states": { "description": "Maximum number of automaton states required for the query.", + "default": 10000.0, "type": "number" }, "rewrite": { @@ -55026,10 +55223,12 @@ }, "analyze_wildcard": { "description": "If `true`, the query attempts to analyze wildcard terms in the query string.", + "default": false, "type": "boolean" }, "auto_generate_synonyms_phrase_query": { "description": "If `true`, the parser creates a match_phrase query for each multi-position token.", + "default": true, "type": "boolean" }, "default_operator": { @@ -55047,10 +55246,12 @@ }, "fuzzy_max_expansions": { "description": "Maximum number of terms to which the query expands for fuzzy matching.", + "default": 50.0, "type": "number" }, "fuzzy_prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.", + "default": 0.0, "type": "number" }, "fuzzy_transpositions": { @@ -55059,6 +55260,7 @@ }, "lenient": { "description": "If `true`, format-based errors, such as providing a text value for a numeric field, are ignored.", + "default": false, "type": "boolean" }, "minimum_should_match": { @@ -55303,10 +55505,12 @@ }, "post": { "description": "The number of tokens after the include span that can’t have overlap with the exclude span.", + "default": 0.0, "type": "number" }, "pre": { "description": "The number of tokens before the include span that can’t have overlap with the exclude span.", + "default": 0.0, "type": "number" } }, @@ -55438,14 +55642,17 @@ "properties": { "tokens_freq_ratio_threshold": { "description": "Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned.", + "default": 5.0, "type": "number" }, "tokens_weight_threshold": { "description": "Tokens whose weight is less than this threshold are considered nonsignificant and pruned.", + "default": 0.4, "type": "number" }, "only_score_pruned_tokens": { "description": "Whether to only score pruned tokens, vs only scoring kept tokens.", + "default": false, "type": "boolean" } } @@ -55463,6 +55670,7 @@ }, "case_insensitive": { "description": "Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`.\nWhen `false`, the case sensitivity of matching depends on the underlying field’s mapping.", + "default": false, "x-state": "Generally available; Added in 7.10.0", "type": "boolean" } @@ -55655,6 +55863,7 @@ "properties": { "buckets": { "description": "The target number of buckets.", + "default": 10.0, "type": "number" }, "field": { @@ -56009,6 +56218,7 @@ "properties": { "precision_threshold": { "description": "A unique count below which counts are expected to be close to accurate.\nThis allows to trade memory for accuracy.", + "default": 3000.0, "type": "number" }, "rehash": { @@ -56045,14 +56255,17 @@ }, "max_unique_tokens": { "description": "The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1.\nSmaller values use less memory and create fewer categories. Larger values will use more memory and\ncreate narrower categories. Max allowed value is 100.", + "default": 50.0, "type": "number" }, "max_matched_tokens": { "description": "The maximum number of token positions to match on before attempting to merge categories. Larger\nvalues will use more memory and create narrower categories. Max allowed value is 100.", + "default": 5.0, "type": "number" }, "similarity_threshold": { "description": "The minimum percentage of tokens that must match for text to be added to the category bucket. Must\nbe between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory\nusage and create narrower categories.", + "default": 50.0, "type": "number" }, "categorization_filters": { @@ -56071,6 +56284,7 @@ }, "size": { "description": "The number of buckets to return.", + "default": 10.0, "type": "number" }, "min_doc_count": { @@ -56146,6 +56360,7 @@ }, "size": { "description": "The number of composite buckets that should be returned.", + "default": 10.0, "type": "number" }, "sources": { @@ -56521,6 +56736,7 @@ }, "max_docs_per_value": { "description": "Limits how many documents are permitted per choice of de-duplicating value.", + "default": 1.0, "type": "number" }, "script": { @@ -56528,6 +56744,7 @@ }, "shard_size": { "description": "Limits how many top-scoring documents are collected in the sample processed on each shard.", + "default": 100.0, "type": "number" }, "field": { @@ -56589,14 +56806,17 @@ }, "minimum_set_size": { "description": "The minimum size of one item set.", + "default": 1.0, "type": "number" }, "minimum_support": { "description": "The minimum support of one item set.", + "default": 0.1, "type": "number" }, "size": { "description": "The number of top item sets to return.", + "default": 10.0, "type": "number" }, "filter": { @@ -56687,10 +56907,12 @@ }, "other_bucket_key": { "description": "The key with which the other bucket is returned.", + "default": "_other_", "type": "string" }, "keyed": { "description": "By default, the named filters aggregation returns the buckets as an object.\nSet to `false` to return the buckets as an array of objects.", + "default": true, "type": "boolean" } } @@ -56724,6 +56946,7 @@ "properties": { "wrap_longitude": { "description": "Specifies whether the bounding box should be allowed to overlap the international date line.", + "default": true, "type": "boolean" } } @@ -56835,6 +57058,7 @@ }, "size": { "description": "The maximum number of geohash buckets to return.", + "default": 10000.0, "type": "number" } } @@ -56870,6 +57094,7 @@ }, "size": { "description": "The maximum length of the line represented in the aggregation.\nValid sizes are between 1 and 10000.", + "default": 10000.0, "type": "number" } }, @@ -56920,6 +57145,7 @@ }, "size": { "description": "The maximum number of buckets to return.", + "default": 10000.0, "type": "number" }, "bounds": { @@ -56945,6 +57171,7 @@ }, "precision": { "description": "Integer zoom of the key used to defined cells or buckets\nin the results. Value should be between 0-15.", + "default": 6.0, "type": "number" }, "bounds": { @@ -56952,6 +57179,7 @@ }, "size": { "description": "Maximum number of buckets to return.", + "default": 10000.0, "type": "number" }, "shard_size": { @@ -57019,6 +57247,7 @@ }, "keyed": { "description": "If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys.", + "default": false, "type": "boolean" } } @@ -57110,10 +57339,12 @@ }, "is_ipv6": { "description": "Defines whether the prefix applies to IPv6 addresses.", + "default": false, "type": "boolean" }, "append_prefix_length": { "description": "Defines whether the prefix length is appended to IP address keys in the response.", + "default": false, "type": "boolean" }, "keyed": { @@ -57122,6 +57353,7 @@ }, "min_doc_count": { "description": "Minimum number of documents in a bucket for it to be included in the response.", + "default": 1.0, "type": "number" } }, @@ -57174,6 +57406,7 @@ }, "num_top_feature_importance_values": { "description": "Specifies the maximum number of feature importance values per document.", + "default": 0.0, "type": "number" } } @@ -57187,6 +57420,7 @@ }, "num_top_feature_importance_values": { "description": "Specifies the maximum number of feature importance values per document.", + "default": 0.0, "type": "number" }, "prediction_field_type": { @@ -57270,6 +57504,7 @@ "properties": { "compression": { "description": "Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error.", + "default": 1000.0, "type": "number" } } @@ -57549,6 +57784,7 @@ }, "shift": { "description": "By default, the window consists of the last n values excluding the current bucket.\nIncreasing `shift` by 1, moves the starting window position by 1 to the right.", + "default": 0.0, "type": "number" }, "keyed": { @@ -57572,6 +57808,7 @@ }, "shift": { "description": "By default, the window consists of the last n values excluding the current bucket.\nIncreasing `shift` by 1, moves the starting window position by 1 to the right.", + "default": 0.0, "type": "number" }, "window": { @@ -57598,10 +57835,12 @@ }, "min_doc_count": { "description": "The minimum number of documents in a bucket for it to be returned.", + "default": 1.0, "type": "number" }, "shard_min_doc_count": { "description": "The minimum number of documents in a bucket on each shard for it to be returned.", + "default": 1.0, "type": "number" }, "shard_size": { @@ -57610,10 +57849,12 @@ }, "show_term_doc_count_error": { "description": "Calculates the doc count error on per term basis.", + "default": false, "type": "boolean" }, "size": { "description": "The number of term buckets should be returned out of the overall terms list.", + "default": 10.0, "type": "number" }, "terms": { @@ -57717,6 +57958,7 @@ "properties": { "keyed": { "description": "By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array.\nSet to `false` to disable this behavior.", + "default": true, "type": "boolean" }, "values": { @@ -57772,6 +58014,7 @@ "properties": { "keyed": { "description": "By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array.\nSet to `false` to disable this behavior.", + "default": true, "type": "boolean" }, "percents": { @@ -57865,6 +58108,7 @@ }, "max_doc_count": { "description": "The maximum number of documents a term should appear in.", + "default": 1.0, "type": "number" }, "missing": { @@ -57872,6 +58116,7 @@ }, "precision": { "description": "The precision of the internal CuckooFilters.\nSmaller precision leads to better approximation, but higher memory usage.", + "default": 0.001, "type": "number" }, "value_type": { @@ -57959,6 +58204,7 @@ "properties": { "shard_size": { "description": "Limits how many top-scoring documents are collected in the sample processed on each shard.", + "default": 100.0, "type": "number" } } @@ -58046,6 +58292,7 @@ }, "min_doc_count": { "description": "Only return terms that are found in more than `min_doc_count` hits.", + "default": 3.0, "type": "number" }, "mutual_information": { @@ -58173,6 +58420,7 @@ }, "min_doc_count": { "description": "Only return values that are found in more than `min_doc_count` hits.", + "default": 3.0, "type": "number" }, "mutual_information": { @@ -58233,6 +58481,7 @@ "properties": { "show_distribution": { "description": "Shows the probability distribution for all characters.", + "default": false, "type": "boolean" } } @@ -58284,6 +58533,7 @@ }, "min_doc_count": { "description": "Only return values that are found in more than `min_doc_count` hits.", + "default": 1.0, "type": "number" }, "missing": { @@ -58319,6 +58569,7 @@ }, "size": { "description": "The number of buckets returned out of the overall terms list.", + "default": 10.0, "type": "number" }, "format": { @@ -58338,6 +58589,7 @@ "properties": { "size": { "description": "The maximum number of results to return.", + "default": 10000.0, "type": "number" }, "keyed": { @@ -58365,6 +58617,7 @@ }, "explain": { "description": "If `true`, returns detailed information about score computation as part of a hit.", + "default": false, "type": "boolean" }, "fields": { @@ -58376,6 +58629,7 @@ }, "from": { "description": "Starting document offset.", + "default": 0.0, "type": "number" }, "highlight": { @@ -58390,6 +58644,7 @@ }, "size": { "description": "The maximum number of top matching hits to return per bucket.", + "default": 3.0, "type": "number" }, "sort": { @@ -58403,10 +58658,12 @@ }, "track_scores": { "description": "If `true`, calculates and returns document scores, even if the scores are not used for sorting.", + "default": false, "type": "boolean" }, "version": { "description": "If `true`, returns document version as part of a hit.", + "default": false, "type": "boolean" }, "seq_no_primary_term": { @@ -58487,6 +58744,7 @@ }, "size": { "description": "The number of top documents from which to return metrics.", + "default": 1.0, "type": "number" }, "sort": { @@ -58580,6 +58838,7 @@ }, "buckets": { "description": "The target number of buckets.", + "default": 10.0, "type": "number" }, "shard_size": { @@ -58785,6 +59044,7 @@ }, "require_alias": { "description": "If `true`, the request's actions must target an index alias.", + "default": false, "type": "boolean" } } @@ -58837,6 +59097,7 @@ "properties": { "require_alias": { "description": "If `true`, the request's actions must target an index alias.", + "default": false, "type": "boolean" }, "retry_on_conflict": { @@ -58862,6 +59123,7 @@ "properties": { "detect_noop": { "description": "If true, the `result` in the response is set to 'noop' when no changes to the document occur.", + "default": true, "type": "boolean" }, "doc": { @@ -58870,6 +59132,7 @@ }, "doc_as_upsert": { "description": "Set to `true` to use the contents of `doc` as the value of `upsert`.", + "default": false, "type": "boolean" }, "script": { @@ -58877,6 +59140,7 @@ }, "scripted_upsert": { "description": "Set to `true` to run the script whether or not the document exists.", + "default": false, "type": "boolean" }, "_source": { @@ -61926,6 +62190,226 @@ } } }, + "cat._types.CatShardColumns": { + "oneOf": [ + { + "$ref": "#/components/schemas/cat._types.CatShardColumn" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/cat._types.CatShardColumn" + } + } + ] + }, + "cat._types.CatShardColumn": { + "anyOf": [ + { + "type": "string", + "enum": [ + "completion.size", + "cs", + "completionSize", + "dataset.size", + "dense_vector.value_count", + "dvc", + "denseVectorCount", + "docs", + "d", + "dc", + "fielddata.evictions", + "fe", + "fielddataEvictions", + "fielddata.memory_size", + "fm", + "fielddataMemory", + "flush.total", + "ft", + "flushTotal", + "flush.total_time", + "ftt", + "flushTotalTime", + "get.current", + "gc", + "getCurrent", + "get.exists_time", + "geti", + "getExistsTime", + "get.exists_total", + "geto", + "getExistsTotal", + "get.missing_time", + "gmti", + "getMissingTime", + "get.missing_total", + "gmto", + "getMissingTotal", + "get.time", + "gti", + "getTime", + "get.total", + "gto", + "getTotal", + "id", + "index", + "i", + "idx", + "indexing.delete_current", + "idc", + "indexingDeleteCurrent", + "indexing.delete_time", + "idti", + "indexingDeleteTime", + "indexing.delete_total", + "idto", + "indexingDeleteTotal", + "indexing.index_current", + "iic", + "indexingIndexCurrent", + "indexing.index_failed_due_to_version_conflict", + "iifvc", + "indexingIndexFailedDueToVersionConflict", + "indexing.index_failed", + "iif", + "indexingIndexFailed", + "indexing.index_time", + "iiti", + "indexingIndexTime", + "indexing.index_total", + "iito", + "indexingIndexTotal", + "ip", + "merges.current", + "mc", + "mergesCurrent", + "merges.current_docs", + "mcd", + "mergesCurrentDocs", + "merges.current_size", + "mcs", + "mergesCurrentSize", + "merges.total", + "mt", + "mergesTotal", + "merges.total_docs", + "mtd", + "mergesTotalDocs", + "merges.total_size", + "mts", + "mergesTotalSize", + "merges.total_time", + "mtt", + "mergesTotalTime", + "node", + "n", + "prirep", + "p", + "pr", + "primaryOrReplica", + "query_cache.evictions", + "qce", + "queryCacheEvictions", + "query_cache.memory_size", + "qcm", + "queryCacheMemory", + "recoverysource.type", + "rs", + "refresh.time", + "rti", + "refreshTime", + "refresh.total", + "rto", + "refreshTotal", + "search.fetch_current", + "sfc", + "searchFetchCurrent", + "search.fetch_time", + "sfti", + "searchFetchTime", + "search.fetch_total", + "sfto", + "searchFetchTotal", + "search.open_contexts", + "so", + "searchOpenContexts", + "search.query_current", + "sqc", + "searchQueryCurrent", + "search.query_time", + "sqti", + "searchQueryTime", + "search.query_total", + "sqto", + "searchQueryTotal", + "search.scroll_current", + "scc", + "searchScrollCurrent", + "search.scroll_time", + "scti", + "searchScrollTime", + "search.scroll_total", + "scto", + "searchScrollTotal", + "segments.count", + "sc", + "segmentsCount", + "segments.fixed_bitset_memory", + "sfbm", + "fixedBitsetMemory", + "segments.index_writer_memory", + "siwm", + "segmentsIndexWriterMemory", + "segments.memory", + "sm", + "segmentsMemory", + "segments.version_map_memory", + "svmm", + "segmentsVersionMapMemory", + "seq_no.global_checkpoint", + "sqg", + "globalCheckpoint", + "seq_no.local_checkpoint", + "sql", + "localCheckpoint", + "seq_no.max", + "sqm", + "maxSeqNo", + "shard", + "s", + "sh", + "dsparse_vector.value_count", + "svc", + "sparseVectorCount", + "state", + "st", + "store", + "sto", + "suggest.current", + "suc", + "suggestCurrent", + "suggest.time", + "suti", + "suggestTime", + "suggest.total", + "suto", + "suggestTotal", + "sync_id", + "unassigned.at", + "ua", + "unassigned.details", + "ud", + "unassigned.for", + "uf", + "unassigned.reason", + "ur" + ] + }, + { + "type": "string" + } + ] + }, "cat.shards.ShardsRecord": { "type": "object", "properties": { @@ -62517,6 +63001,69 @@ } } }, + "cat._types.CatThreadPoolColumns": { + "oneOf": [ + { + "$ref": "#/components/schemas/cat._types.CatThreadPoolColumn" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/cat._types.CatThreadPoolColumn" + } + } + ] + }, + "cat._types.CatThreadPoolColumn": { + "anyOf": [ + { + "type": "string", + "enum": [ + "active", + "a", + "completed", + "c", + "core", + "cr", + "ephemeral_id", + "eid", + "host", + "h", + "ip", + "i", + "keep_alive", + "k", + "largest", + "l", + "max", + "mx", + "name", + "node_id", + "id", + "node_name", + "pid", + "p", + "pool_size", + "psz", + "port", + "po", + "queue", + "q", + "queue_size", + "qs", + "rejected", + "r", + "size", + "sz", + "type", + "t" + ] + }, + { + "type": "string" + } + ] + }, "cat.thread_pool.ThreadPoolRecord": { "type": "object", "properties": { @@ -62910,6 +63457,7 @@ "$ref": "#/components/schemas/indices._types.IndexSegmentSort" }, "number_of_shards": { + "default": "1", "x-state": "Generally available", "oneOf": [ { @@ -62921,6 +63469,7 @@ ] }, "number_of_replicas": { + "default": "0", "x-state": "Generally available", "oneOf": [ { @@ -62938,15 +63487,18 @@ "$ref": "#/components/schemas/indices._types.IndexCheckOnStartup" }, "codec": { + "default": "LZ4", "type": "string" }, "routing_partition_size": { "$ref": "#/components/schemas/_spec_utils.Stringifiedinteger" }, "load_fixed_bitset_filters_eagerly": { + "default": true, "type": "boolean" }, "hidden": { + "default": "false", "oneOf": [ { "type": "boolean" @@ -62957,6 +63509,7 @@ ] }, "auto_expand_replicas": { + "default": "false", "oneOf": [ { "type": "string" @@ -62976,24 +63529,31 @@ "$ref": "#/components/schemas/_types.Duration" }, "max_result_window": { + "default": 10000.0, "type": "number" }, "max_inner_result_window": { + "default": 100.0, "type": "number" }, "max_rescore_window": { + "default": 10000.0, "type": "number" }, "max_docvalue_fields_search": { + "default": 100.0, "type": "number" }, "max_script_fields": { + "default": 32.0, "type": "number" }, "max_ngram_diff": { + "default": 1.0, "type": "number" }, "max_shingle_diff": { + "default": 3.0, "type": "number" }, "blocks": { @@ -63009,9 +63569,11 @@ "$ref": "#/components/schemas/indices._types.SettingsHighlight" }, "max_terms_count": { + "default": 65536.0, "type": "number" }, "max_regex_length": { + "default": 1000.0, "type": "number" }, "routing": { @@ -63124,6 +63686,7 @@ "properties": { "enabled": { "description": "Indicates whether soft deletes are enabled on the index.", + "default": true, "type": "boolean" }, "retention_lease": { @@ -63364,6 +63927,7 @@ "type": "object", "properties": { "max_analyzed_offset": { + "default": 1000000.0, "type": "number" } } @@ -63473,6 +64037,7 @@ }, "origination_date": { "description": "If specified, this is the timestamp used to calculate the index age for its phase transitions. Use this setting\nif you create a new index that contains old data and want to use the original creation date to calculate the index\nage. Specified as a Unix epoch value in milliseconds.", + "default": 0.0, "type": "number" }, "parse_origination_date": { @@ -63484,10 +64049,12 @@ }, "rollover_alias": { "description": "The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action.\nWhen the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more\ninformation about rolling indices, see Rollover.", + "default": "", "type": "string" }, "prefer_ilm": { "description": "Preference for the system that manages a data stream backing index (preferring ILM when both ILM and DLM are\napplicable for an index).", + "default": "true", "oneOf": [ { "type": "boolean" @@ -63833,6 +64400,7 @@ }, "max_output_size": { "description": "The maximum token size to emit. Tokens larger than this size will be discarded.\nDefaults to `255`", + "default": 255.0, "type": "number" }, "separator": { @@ -63923,10 +64491,12 @@ }, "lowercase": { "description": "Should terms be lowercased or not.\nDefaults to `true`.", + "default": true, "type": "boolean" }, "pattern": { "description": "A Java regular expression.\nDefaults to `\\W+`.", + "default": "\\W+", "type": "string" }, "stopwords": { @@ -63969,6 +64539,7 @@ }, "max_token_length": { "description": "The maximum token length. If a token is seen that exceeds this length then it is split at `max_token_length` intervals.\nDefaults to `255`.", + "default": 255.0, "type": "number" }, "stopwords": { @@ -68033,6 +68604,7 @@ ] }, "buffer_size": { + "default": 256.0, "type": "number" } }, @@ -68108,6 +68680,7 @@ "type": "number" }, "token_chars": { + "default": [], "type": "array", "items": { "$ref": "#/components/schemas/_types.analysis.TokenChar" @@ -68754,6 +69327,7 @@ "properties": { "limit": { "description": "The maximum number of fields in an index. Field and object mappings, as well as field aliases count towards this limit.\nThe limit is in place to prevent mappings and searches from becoming too large. Higher values can lead to performance\ndegradations and memory issues, especially in clusters with a high load or few resources.", + "default": "1000", "oneOf": [ { "type": "number" @@ -68765,6 +69339,7 @@ }, "ignore_dynamic_beyond_limit": { "description": "This setting determines what happens when a dynamically mapped field would exceed the total fields limit. When set\nto false (the default), the index request of the document that tries to add a dynamic field to the mapping will fail\nwith the message Limit of total fields [X] has been exceeded. When set to true, the index request will not fail.\nInstead, fields that would exceed the limit are not added to the mapping, similar to dynamic: false.\nThe fields that were not added to the mapping will be added to the _ignored field.", + "default": "false", "oneOf": [ { "type": "boolean" @@ -68781,6 +69356,7 @@ "properties": { "limit": { "description": "The maximum depth for a field, which is measured as the number of inner objects. For instance, if all fields are defined\nat the root object level, then the depth is 1. If there is one object mapping, then the depth is 2, etc.", + "default": 20.0, "type": "number" } } @@ -68790,6 +69366,7 @@ "properties": { "limit": { "description": "The maximum number of distinct nested mappings in an index. The nested type should only be used in special cases, when\narrays of objects need to be queried independently of each other. To safeguard against poorly designed mappings, this\nsetting limits the number of unique nested types per index.", + "default": 50.0, "type": "number" } } @@ -68799,6 +69376,7 @@ "properties": { "limit": { "description": "The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps\nto prevent out of memory errors when a document contains too many nested objects.", + "default": 10000.0, "type": "number" } } @@ -71008,6 +71586,7 @@ }, "index": { "description": "If `true`, you can search this field using the kNN search API.", + "default": true, "type": "boolean" }, "index_options": { @@ -71040,10 +71619,12 @@ }, "ef_construction": { "description": "The number of candidates to track while assembling the list of nearest neighbors for each new node.\n\nOnly applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types.", + "default": 100.0, "type": "number" }, "m": { "description": "The number of neighbors each node will be connected to in the HNSW graph.\n\nOnly applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types.", + "default": 16.0, "type": "number" }, "type": { @@ -71803,6 +72384,7 @@ }, "time_series_dimension": { "description": "For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false.", + "default": false, "x-state": "Technical preview", "type": "boolean" } @@ -72343,6 +72925,7 @@ }, "is_write_index": { "description": "If `true`, the index is the write index for the alias.", + "default": false, "type": "boolean" }, "routing": { @@ -72355,6 +72938,7 @@ }, "is_hidden": { "description": "If `true`, the alias is hidden.\nAll indices for the alias must have the same `is_hidden` value.", + "default": false, "x-state": "Generally available; Added in 7.16.0", "type": "boolean" } @@ -72423,6 +73007,7 @@ }, "enabled": { "description": "If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle\nthat's disabled (enabled: `false`) will have no effect on the data stream.", + "default": true, "type": "boolean" } } @@ -73149,10 +73734,12 @@ }, "is_hidden": { "description": "If `true`, the alias is hidden.\nAll indices for the alias must have the same `is_hidden` value.", + "default": false, "type": "boolean" }, "is_write_index": { "description": "If `true`, the index is the write index for the alias.", + "default": false, "type": "boolean" }, "routing": { @@ -77267,14 +77854,17 @@ }, "min_doc_count": { "description": "Specifies how many documents must contain a pair of terms before it is considered to be a useful connection.\nThis setting acts as a certainty threshold.", + "default": 3.0, "type": "number" }, "shard_min_doc_count": { "description": "Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration.", + "default": 2.0, "type": "number" }, "size": { "description": "Specifies the maximum number of vertex terms returned for each field.", + "default": 5.0, "type": "number" } }, @@ -77304,6 +77894,7 @@ }, "sample_size": { "description": "Each hop considers a sample of the best-matching documents on each shard.\nUsing samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms.\nVery small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms.\nVery large sample sizes can dilute the quality of the results and increase execution times.", + "default": 100.0, "type": "number" }, "timeout": { @@ -78689,6 +79280,7 @@ }, "remove_index_blocks": { "description": "If index blocks should be removed when creating destination index (optional)", + "default": true, "type": "boolean" } } @@ -79149,10 +79741,12 @@ "properties": { "hidden": { "description": "If true, the data stream is hidden.", + "default": false, "type": "boolean" }, "allow_custom_routing": { "description": "If true, the data stream supports custom routing.", + "default": false, "type": "boolean" } } @@ -80984,6 +81578,7 @@ }, "is_hidden": { "description": "If `true`, the alias is hidden.", + "default": false, "type": "boolean" }, "is_write_index": { @@ -80998,6 +81593,7 @@ }, "must_exist": { "description": "If `true`, the alias must exist to perform the action.", + "default": false, "type": "boolean" } } @@ -81030,6 +81626,7 @@ }, "must_exist": { "description": "If `true`, the alias must exist to perform the action.", + "default": false, "type": "boolean" } } @@ -81045,6 +81642,7 @@ }, "must_exist": { "description": "If `true`, the alias must exist to perform the action.", + "default": false, "type": "boolean" } } @@ -81414,18 +82012,22 @@ "properties": { "max_chunk_size": { "description": "The maximum size of a chunk in words.\nThis value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy).", + "default": 250.0, "type": "number" }, "overlap": { "description": "The number of overlapping words for chunks.\nIt is applicable only to a `word` chunking strategy.\nThis value cannot be higher than half the `max_chunk_size` value.", + "default": 100.0, "type": "number" }, "sentence_overlap": { "description": "The number of overlapping sentences for chunks.\nIt is applicable only for a `sentence` chunking strategy.\nIt can be either `1` or `0`.", + "default": 1.0, "type": "number" }, "strategy": { "description": "The chunking strategy: `sentence` or `word`.", + "default": "sentence", "type": "string" } } @@ -81716,6 +82318,7 @@ "properties": { "max_new_tokens": { "description": "For a `completion` task, it sets the maximum number for the output tokens to be generated.", + "default": 64.0, "type": "number" }, "temperature": { @@ -81908,6 +82511,7 @@ }, "max_new_tokens": { "description": "For a `completion` task, provide a hint for the maximum number of output tokens to be generated.", + "default": 64.0, "type": "number" }, "temperature": { @@ -82226,6 +82830,7 @@ "properties": { "enabled": { "description": "Turn on `adaptive_allocations`.", + "default": false, "type": "boolean" }, "max_number_of_allocations": { @@ -82243,6 +82848,7 @@ "properties": { "return_documents": { "description": "For a `rerank` task, return the document instead of only the index.", + "default": true, "type": "boolean" } } @@ -82777,6 +83383,7 @@ }, "url": { "description": "The URL endpoint to use for the requests.\nIt can be changed for testing purposes.", + "default": "https://api.openai.com/v1/embeddings.", "type": "string" } }, @@ -82879,6 +83486,7 @@ }, "return_documents": { "description": "Whether to return the source documents in the response.\nOnly for the `rerank` task type.", + "default": false, "type": "boolean" }, "top_k": { @@ -82887,6 +83495,7 @@ }, "truncation": { "description": "Whether to truncate the input texts to fit within the context length.", + "default": true, "type": "boolean" } } @@ -83346,6 +83955,7 @@ }, "deprecated": { "description": "Marks this ingest pipeline as deprecated.\nWhen a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.", + "default": false, "type": "boolean" }, "_meta": { @@ -83522,6 +84132,7 @@ }, "allow_duplicates": { "description": "If `false`, the processor does not append values already present in the field.", + "default": true, "type": "boolean" } }, @@ -83572,10 +84183,12 @@ }, "ignore_missing": { "description": "If `true` and field does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "indexed_chars": { "description": "The number of chars being used for extraction to prevent huge fields.\nUse `-1` for no limit.", + "default": 100000.0, "type": "number" }, "indexed_chars_field": { @@ -83593,6 +84206,7 @@ }, "remove_binary": { "description": "If true, the binary field will be removed from the document", + "default": false, "type": "boolean" }, "resource_name": { @@ -83619,6 +84233,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -83648,6 +84263,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "shape_type": { @@ -83709,10 +84325,12 @@ }, "seed": { "description": "Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The\nseed can prevent hash collisions between network domains, such as a staging\nand production network that use the same addressing scheme.", + "default": 0.0, "type": "number" }, "ignore_missing": { "description": "If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.", + "default": true, "type": "boolean" } } @@ -83732,6 +84350,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -83782,10 +84401,12 @@ }, "quote": { "description": "Quote used in CSV, has to be single character string.", + "default": "\"", "type": "string" }, "separator": { "description": "Separator used in CSV, has to be single character string.", + "default": ",", "type": "string" }, "target_fields": { @@ -83823,6 +84444,7 @@ }, "locale": { "description": "The locale to use when parsing the date, relevant when parsing month names or week days.\nSupports template snippets.", + "default": "ENGLISH", "type": "string" }, "target_field": { @@ -83830,10 +84452,12 @@ }, "timezone": { "description": "The timezone to use when parsing the date.\nSupports template snippets.", + "default": "UTC", "type": "string" }, "output_format": { "description": "The format to use when writing the date to target_field. Must be a valid\njava time pattern.", + "default": "yyyy-MM-dd'T'HH:mm:ss.SSSXXX", "type": "string" } }, @@ -83868,6 +84492,7 @@ }, "index_name_format": { "description": "The format to be used when printing the parsed date into the index name.\nA valid java time pattern is expected here.\nSupports template snippets.", + "default": "yyyy-MM-dd", "type": "string" }, "index_name_prefix": { @@ -83876,10 +84501,12 @@ }, "locale": { "description": "The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days.", + "default": "ENGLISH", "type": "string" }, "timezone": { "description": "The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.", + "default": "UTC", "type": "string" } }, @@ -83900,6 +84527,7 @@ "properties": { "append_separator": { "description": "The character(s) that separate the appended fields.", + "default": "\"\"", "type": "string" }, "field": { @@ -83907,6 +84535,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "pattern": { @@ -83934,6 +84563,7 @@ }, "override": { "description": "Controls the behavior when there is already an existing nested object that conflicts with the expanded field.\nWhen `false`, the processor will merge conflicts by combining the old and the new values into an array.\nWhen `true`, the value from the expanded field will overwrite the existing value.", + "default": false, "type": "boolean" }, "path": { @@ -83970,14 +84600,17 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "max_matches": { "description": "The maximum number of matched documents to include under the configured target field.\nThe `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object.\nIn order to avoid documents getting too large, the maximum allowed value is 128.", + "default": 1.0, "type": "number" }, "override": { "description": "If processor will update fields with pre-existing non-null-valued field.\nWhen set to `false`, such fields will not be touched.", + "default": true, "type": "boolean" }, "policy_name": { @@ -84050,6 +84683,7 @@ }, "ignore_missing": { "description": "If true, the processor ignores any missing fields. If all fields are\nmissing, the processor silently exits without modifying the document.", + "default": false, "type": "boolean" } }, @@ -84082,6 +84716,7 @@ }, "ignore_missing": { "description": "If `true`, the processor silently exits without changing the document if the `field` is `null` or missing.", + "default": false, "type": "boolean" }, "processor": { @@ -84105,6 +84740,7 @@ "properties": { "database_file": { "description": "The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory.", + "default": "GeoLite2-City.mmdb", "type": "string" }, "field": { @@ -84112,10 +84748,12 @@ }, "first_only": { "description": "If `true`, only the first found IP location data will be returned, even if the field contains an array.", + "default": true, "type": "boolean" }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "properties": { @@ -84171,6 +84809,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_format": { @@ -84209,6 +84848,7 @@ "properties": { "database_file": { "description": "The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory.", + "default": "GeoLite2-City.mmdb", "type": "string" }, "field": { @@ -84216,10 +84856,12 @@ }, "first_only": { "description": "If `true`, only the first found geoip data will be returned, even if the field contains an array.", + "default": true, "type": "boolean" }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "properties": { @@ -84253,6 +84895,7 @@ "properties": { "ecs_compatibility": { "description": "Must be disabled or v1. If v1, the processor uses patterns with Elastic\nCommon Schema (ECS) field names.", + "default": "disabled", "type": "string" }, "field": { @@ -84260,6 +84903,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "pattern_definitions": { @@ -84278,6 +84922,7 @@ }, "trace_match": { "description": "When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched.", + "default": false, "type": "boolean" } }, @@ -84304,6 +84949,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "pattern": { @@ -84339,6 +84985,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document,", + "default": false, "type": "boolean" }, "target_field": { @@ -84421,6 +85068,7 @@ }, "num_top_feature_importance_values": { "description": "Specifies the maximum number of feature importance values per document.", + "default": 0.0, "type": "number" } } @@ -84430,10 +85078,12 @@ "properties": { "num_top_classes": { "description": "Specifies the number of top class predictions to return.", + "default": 0.0, "type": "number" }, "num_top_feature_importance_values": { "description": "Specifies the maximum number of feature importance values per document.", + "default": 0.0, "type": "number" }, "results_field": { @@ -84499,6 +85149,7 @@ "properties": { "add_to_root": { "description": "Flag that forces the parsed JSON to be added at the top level of the document.\n`target_field` must not be set when this option is chosen.", + "default": false, "type": "boolean" }, "add_to_root_conflict_strategy": { @@ -84506,6 +85157,7 @@ }, "allow_duplicate_keys": { "description": "When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys.\nInstead, the last encountered value for any duplicate key wins.", + "default": false, "type": "boolean" }, "field": { @@ -84552,6 +85204,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "include_keys": { @@ -84563,10 +85216,12 @@ }, "prefix": { "description": "Prefix to be added to extracted keys.", + "default": "null", "type": "string" }, "strip_brackets": { "description": "If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `\"` from extracted values.", + "default": false, "type": "boolean" }, "target_field": { @@ -84606,6 +85261,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -84647,6 +85303,7 @@ }, "ignore_missing": { "description": "If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.", + "default": true, "type": "boolean" } } @@ -84666,6 +85323,7 @@ }, "ignore_missing_pipeline": { "description": "Whether to ignore missing pipelines instead of failing.", + "default": false, "type": "boolean" } }, @@ -84701,22 +85359,27 @@ }, "prefix": { "description": "Start a redacted section with this token", + "default": "<", "type": "string" }, "suffix": { "description": "End a redacted section with this token", + "default": ">", "type": "string" }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "skip_if_unlicensed": { "description": "If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document", + "default": false, "type": "boolean" }, "trace_redact": { "description": "If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted", + "default": false, "x-state": "Generally available; Added in 8.16.0", "type": "boolean" } @@ -84744,6 +85407,7 @@ }, "ignore_missing": { "description": "If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.", + "default": true, "type": "boolean" } }, @@ -84769,6 +85433,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" } }, @@ -84791,6 +85456,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -84892,6 +85558,7 @@ }, "ignore_empty_value": { "description": "If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "media_type": { @@ -84900,6 +85567,7 @@ }, "override": { "description": "If `true` processor will update fields with pre-existing non-null-valued field.\nWhen set to `false`, such fields will not be touched.", + "default": true, "type": "boolean" }, "value": { @@ -84975,10 +85643,12 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "preserve_trailing": { "description": "Preserves empty trailing fields, if any.", + "default": false, "type": "boolean" }, "separator": { @@ -85019,6 +85689,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -85044,6 +85715,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -85069,6 +85741,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -85094,14 +85767,17 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "keep_original": { "description": "If `true`, the processor copies the unparsed URI to `.original`.", + "default": true, "type": "boolean" }, "remove_if_successful": { "description": "If `true`, the processor removes the `field` after parsing the URI string.\nIf parsing fails, the processor does not remove the `field`.", + "default": false, "type": "boolean" }, "target_field": { @@ -85127,6 +85803,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "regex_file": { @@ -85138,6 +85815,18 @@ }, "properties": { "description": "Controls what properties are added to `target_field`.", + "default": [ + "name", + "major", + "minor", + "patch", + "build", + "os", + "os_name", + "os_major", + "os_minor", + "device" + ], "type": "array", "items": { "$ref": "#/components/schemas/ingest._types.UserAgentProperty" @@ -85145,6 +85834,7 @@ }, "extract_device_type": { "description": "Extracts device type from the user agent string on a best-effort basis.", + "default": false, "x-state": "Beta; Added in 8.9.0", "type": "boolean" } @@ -85855,6 +86545,7 @@ }, "use_null": { "description": "Defines whether a new series is used as the null series when there is no value for the by or partition fields.", + "default": false, "type": "boolean" } } @@ -85864,6 +86555,9 @@ "properties": { "actions": { "description": "The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined.", + "default": [ + "skip_result" + ], "type": "array", "items": { "$ref": "#/components/schemas/ml._types.RuleAction" @@ -86521,6 +87215,7 @@ }, "num_top_classes": { "description": "Defines the number of categories for which the predicted probabilities are reported. It must be non-negative or -1. If it is -1 or greater than the total number of categories, probabilities are reported for all categories; if you have a large number of categories, there could be a significant effect on the size of your destination index. NOTE: To use the AUC ROC evaluation method, `num_top_classes` must be set to -1 or a value greater than or equal to the total number of categories.", + "default": 2.0, "type": "number" } } @@ -86544,6 +87239,7 @@ }, "early_stopping_enabled": { "description": "Advanced configuration option. Specifies whether the training process should finish if it is not finding any better performing models. If disabled, the training process can take significantly longer and the chance of finding a better performing model is unremarkable.", + "default": true, "type": "boolean" }, "eta": { @@ -86583,6 +87279,7 @@ }, "num_top_feature_importance_values": { "description": "Advanced configuration option. Specifies the maximum number of feature importance values per document to return. By default, no feature importance calculation occurs.", + "default": 0.0, "type": "number" }, "prediction_field_name": { @@ -86751,14 +87448,17 @@ "properties": { "compute_feature_influence": { "description": "Specifies whether the feature influence calculation is enabled.", + "default": true, "type": "boolean" }, "feature_influence_threshold": { "description": "The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1.", + "default": 0.1, "type": "number" }, "method": { "description": "The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score.", + "default": "ensemble", "type": "string" }, "n_neighbors": { @@ -86771,6 +87471,7 @@ }, "standardization_enabled": { "description": "If true, the following operation is performed on the columns before computing outlier scores: `(x_i - mean(x_i)) / sd(x_i)`.", + "default": true, "type": "boolean" } } @@ -86785,6 +87486,7 @@ "properties": { "loss_function": { "description": "The loss function used during regression. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber loss).", + "default": "mse", "type": "string" }, "loss_function_parameter": { @@ -86854,10 +87556,12 @@ "properties": { "from": { "description": "Skips the specified number of items.", + "default": 0.0, "type": "number" }, "size": { "description": "Specifies the maximum number of items to obtain.", + "default": 10000.0, "type": "number" } } @@ -87004,10 +87708,12 @@ }, "skip_result": { "description": "When true the model will not create results for this calendar period.", + "default": true, "type": "boolean" }, "skip_model_update": { "description": "When true the model will not be updated for this calendar period.", + "default": true, "type": "boolean" }, "force_time_shift": { @@ -87407,10 +88113,12 @@ "properties": { "compute_feature_influence": { "description": "Specifies whether the feature influence calculation is enabled.", + "default": true, "type": "boolean" }, "feature_influence_threshold": { "description": "The minimum outlier score that a document needs to have in order to calculate its feature influence score.\nValue range: 0-1", + "default": 0.1, "type": "number" }, "method": { @@ -87427,6 +88135,7 @@ }, "standardization_enabled": { "description": "If `true`, the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i).", + "default": true, "type": "boolean" } } @@ -87838,10 +88547,12 @@ }, "ignore_unavailable": { "description": "If true, missing or closed indices are not included in the response.", + "default": false, "type": "boolean" }, "ignore_throttled": { "description": "If true, concrete, expanded or aliased indices are ignored when frozen.", + "default": true, "type": "boolean" } } @@ -88252,6 +88963,7 @@ }, "daily_model_snapshot_retention_after_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job.\nIt specifies a period of time (in days) after which only the first snapshot per day is retained.\nThis period is relative to the timestamp of the most recent snapshot for this job.\nValid values range from 0 to `model_snapshot_retention_days`.", + "default": 1.0, "type": "number" }, "data_description": { @@ -88324,6 +89036,7 @@ "properties": { "categorization_examples_limit": { "description": "The maximum number of examples stored per category in memory and in the results data store. If you increase this value, more examples are available, however it requires that you have more storage available. If you set this value to 0, no examples are stored. NOTE: The `categorization_examples_limit` applies only to analysis that uses categorization.", + "default": 4.0, "type": "number" }, "model_memory_limit": { @@ -88369,6 +89082,7 @@ }, "time_format": { "description": "The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan 1970). The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. The `epoch` and `epoch_ms` time formats accept either integer or real values. Custom patterns must conform to the Java DateTimeFormatter class. When you use date-time formatting patterns, it is recommended that you provide the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient to produce a complete timestamp, job creation fails.", + "default": "epoch", "type": "string" }, "field_delimiter": { @@ -88381,11 +89095,13 @@ "properties": { "annotations_enabled": { "description": "If true, enables calculation and storage of the model change annotations for each entity that is being analyzed.", + "default": true, "x-state": "Generally available; Added in 7.9.0", "type": "boolean" }, "enabled": { "description": "If true, enables calculation and storage of the model bounds for each entity that is being analyzed.", + "default": false, "type": "boolean" }, "terms": { @@ -89207,14 +89923,17 @@ "properties": { "do_lower_case": { "description": "Should the tokenizer lower case the text", + "default": false, "type": "boolean" }, "max_sequence_length": { "description": "Maximum input sequence length for the model", + "default": 512.0, "type": "number" }, "span": { "description": "Tokenization spanning options. Special value of -1 indicates no spanning takes place", + "default": -1.0, "type": "number" }, "truncate": { @@ -89222,6 +89941,7 @@ }, "with_special_tokens": { "description": "Is tokenization completed with special tokens", + "default": true, "type": "boolean" } } @@ -89245,6 +89965,7 @@ "properties": { "add_prefix_space": { "description": "Should the tokenizer prefix input with a space character", + "default": false, "type": "boolean" } } @@ -89281,6 +90002,7 @@ }, "hypothesis_template": { "description": "Hypothesis template used when tokenizing labels for prediction", + "default": "\"This example is {}.\"", "type": "string" }, "classification_labels": { @@ -89296,6 +90018,7 @@ }, "multi_label": { "description": "Indicates if more than one true label exists.", + "default": false, "type": "boolean" }, "labels": { @@ -90555,6 +91278,7 @@ }, "scroll_size": { "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default.", + "default": 1000.0, "type": "number" } } @@ -90564,6 +91288,7 @@ "properties": { "allow_lazy_open": { "description": "Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node.", + "default": false, "type": "boolean" }, "analysis_config": { @@ -90580,6 +91305,7 @@ }, "daily_model_snapshot_retention_after_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job.\nIt specifies a period of time (in days) after which only the first snapshot per day is retained.\nThis period is relative to the timestamp of the most recent snapshot for this job.", + "default": 1.0, "type": "number" }, "data_description": { @@ -90611,6 +91337,7 @@ }, "model_snapshot_retention_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job.\nIt specifies the maximum period of time (in days) that snapshots are retained.\nThis period is relative to the timestamp of the most recent snapshot for this job.\nThe default value is `10`, which means snapshots ten days older than the newest snapshot are deleted.", + "default": 10.0, "type": "number" }, "renormalization_window_days": { @@ -90741,6 +91468,7 @@ }, "use_null": { "description": "Defines whether a new series is used as the null series when there is no value for the by or partition fields.", + "default": false, "type": "boolean" } }, @@ -91184,6 +91912,7 @@ "properties": { "explain": { "description": "If `true`, returns detailed information about score calculation as part of each hit.", + "default": false, "type": "boolean" }, "id": { @@ -91198,6 +91927,7 @@ }, "profile": { "description": "If `true`, the query execution is profiled.", + "default": false, "type": "boolean" }, "source": { @@ -91223,6 +91953,7 @@ }, "field_statistics": { "description": "If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies.", + "default": true, "type": "boolean" }, "filter": { @@ -91230,14 +91961,17 @@ }, "offsets": { "description": "If `true`, the response includes term offsets.", + "default": true, "type": "boolean" }, "payloads": { "description": "If `true`, the response includes term payloads.", + "default": true, "type": "boolean" }, "positions": { "description": "If `true`, the response includes term positions.", + "default": true, "type": "boolean" }, "routing": { @@ -91245,6 +91979,7 @@ }, "term_statistics": { "description": "If true, the response includes term frequency and document frequency.", + "default": false, "type": "boolean" }, "version": { @@ -91264,6 +91999,7 @@ }, "max_num_terms": { "description": "The maximum number of terms that must be returned per field.", + "default": 25.0, "type": "number" }, "max_term_freq": { @@ -91272,18 +92008,22 @@ }, "max_word_length": { "description": "The maximum word length above which words will be ignored.\nDefaults to unbounded.", + "default": 0.0, "type": "number" }, "min_doc_freq": { "description": "Ignore terms which do not occur in at least this many docs.", + "default": 1.0, "type": "number" }, "min_term_freq": { "description": "Ignore words with less than this frequency in the source doc.", + "default": 1.0, "type": "number" }, "min_word_length": { "description": "The minimum word length below which words will be ignored.", + "default": 0.0, "type": "number" } } @@ -94193,6 +94933,7 @@ "properties": { "ignore_unlabeled": { "description": "Controls how unlabeled documents in the search results are counted. If set to true, unlabeled documents are ignored and neither count as relevant or irrelevant. Set to false (the default), they are treated as irrelevant.", + "default": false, "type": "boolean" } } @@ -94209,6 +94950,7 @@ "properties": { "relevant_rating_threshold": { "description": "Sets the rating threshold above which documents are considered to be \"relevant\".", + "default": 1.0, "type": "number" } } @@ -94220,6 +94962,7 @@ "properties": { "k": { "description": "Sets the maximum number of documents retrieved per query. This value will act in place of the usual size parameter in the query.", + "default": 10.0, "type": "number" } } @@ -94257,6 +95000,7 @@ "properties": { "normalize": { "description": "If set to true, this metric will calculate the Normalized DCG.", + "default": false, "type": "boolean" } } @@ -94416,6 +95160,7 @@ }, "size": { "description": "The number of documents to index per batch.\nUse it when you are indexing from remote to ensure that the batches fit within the on-heap buffer, which defaults to a maximum size of 100 MB.", + "default": 1000.0, "type": "number" }, "slice": { @@ -95742,6 +96487,7 @@ }, "allow_restricted_indices": { "description": "Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`.", + "default": false, "x-state": "Generally available", "type": "boolean" } @@ -95891,6 +96637,7 @@ }, "allow_restricted_indices": { "description": "Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`.", + "default": false, "x-state": "Generally available", "type": "boolean" } @@ -96075,6 +96822,7 @@ }, "allow_restricted_indices": { "description": "This needs to be set to true if the patterns in the names field should cover system indices.", + "default": false, "type": "boolean" } }, @@ -96107,6 +96855,7 @@ }, "allow_restricted_indices": { "description": "Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`.", + "default": false, "x-state": "Generally available", "type": "boolean" } @@ -97229,10 +97978,12 @@ }, "other_bucket_key": { "description": "The key with which the other bucket is returned.", + "default": "_other_", "type": "string" }, "keyed": { "description": "By default, the named filters aggregation returns the buckets as an object.\nSet to `false` to return the buckets as an array of objects.", + "default": true, "type": "boolean" } } @@ -97783,6 +98534,7 @@ "properties": { "ignore_unavailable": { "description": "If false, the snapshot fails if any data stream or index in indices is missing or closed. If true, the snapshot ignores missing or closed data streams and indices.", + "default": false, "type": "boolean" }, "indices": { @@ -97790,6 +98542,7 @@ }, "include_global_state": { "description": "If true, the current global state is included in the snapshot.", + "default": true, "type": "boolean" }, "feature_states": { @@ -97804,6 +98557,7 @@ }, "partial": { "description": "If false, the entire snapshot will fail if one or more indices included in the snapshot do not have all primary shards available.", + "default": false, "type": "boolean" } } @@ -98134,26 +98888,32 @@ }, "client": { "description": "The name of the Azure repository client to use.", + "default": "default", "type": "string" }, "container": { "description": "The Azure container.", + "default": "elasticsearch-snapshots", "type": "string" }, "delete_objects_max_size": { "description": "The maxmimum batch size, between 1 and 256, used for `BlobBatch` requests.\nDefaults to 256 which is the maximum number supported by the Azure blob batch API.", + "default": 256.0, "type": "number" }, "location_mode": { "description": "Either `primary_only` or `secondary_only`.\nNote that if you set it to `secondary_only`, it will force `readonly` to `true`.", + "default": "primary_only", "type": "string" }, "max_concurrent_batch_deletes": { "description": "The maximum number of concurrent batch delete requests that will be submitted for any individual bulk delete with `BlobBatch`.\nNote that the effective number of concurrent deletes is further limited by the Azure client connection and event loop thread limits.\nDefaults to 10, minimum is 1, maximum is 100.", + "default": 10.0, "type": "number" }, "readonly": { "description": "If `true`, the repository is read-only.\nThe cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it.\n\nOnly a cluster with write access can create snapshots in the repository.\nAll other clusters connected to the repository should have the `readonly` parameter set to `true`.\nIf `false`, the cluster can write to the repository and create snapshots in it.\n\nIMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository.\nHaving multiple clusters write to the repository at the same time risks corrupting the contents of the repository.", + "default": false, "type": "boolean" } } @@ -98168,6 +98928,7 @@ }, "compress": { "description": "When set to `true`, metadata files are stored in compressed format.\nThis setting doesn't affect index files that are already compressed by default.", + "default": true, "type": "boolean" }, "max_restore_bytes_per_sec": { @@ -98238,10 +98999,12 @@ }, "client": { "description": "The name of the client to use to connect to Google Cloud Storage.", + "default": "default", "type": "string" }, "readonly": { "description": "If `true`, the repository is read-only.\nThe cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it.\n\nOnly a cluster with write access can create snapshots in the repository.\nAll other clusters connected to the repository should have the `readonly` parameter set to `true`.\n\nIf `false`, the cluster can write to the repository and create snapshots in it.\n\nIMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository.\nHaving multiple clusters write to the repository at the same time risks corrupting the contents of the repository.", + "default": false, "type": "boolean" } }, @@ -98307,10 +99070,12 @@ "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl" }, "description": "The S3 repository supports all S3 canned ACLs: `private`, `public-read`, `public-read-write`, `authenticated-read`, `log-delivery-write`, `bucket-owner-read`, `bucket-owner-full-control`.\nYou could specify a canned ACL using the `canned_acl` setting.\nWhen the S3 repository creates buckets and objects, it adds the canned ACL into the buckets and objects.", + "default": "private", "type": "string" }, "client": { "description": "The name of the S3 client to use to connect to S3.", + "default": "default", "type": "string" }, "delete_objects_max_size": { @@ -98318,6 +99083,7 @@ "url": "https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html" }, "description": "The maxmimum batch size, between 1 and 1000, used for `DeleteObjects` requests.\nDefaults to 1000 which is the maximum number supported by the AWS DeleteObjects API.", + "default": 1000.0, "type": "number" }, "get_register_retry_delay": { @@ -98325,6 +99091,7 @@ }, "max_multipart_parts": { "description": "The maximum number of parts that Elasticsearch will write during a multipart upload of a single object.\nFiles which are larger than `buffer_size × max_multipart_parts` will be chunked into several smaller objects.\nElasticsearch may also split a file across multiple objects to satisfy other constraints such as the `chunk_size` limit.\nDefaults to `10000` which is the maximum number of parts in a multipart upload in AWS S3.", + "default": 10000.0, "type": "number" }, "max_multipart_upload_cleanup_size": { @@ -98332,14 +99099,17 @@ "url": "https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html" }, "description": "The maximum number of possibly-dangling multipart uploads to clean up in each batch of snapshot deletions.\nDefaults to 1000 which is the maximum number supported by the AWS ListMultipartUploads API.\nIf set to `0`, Elasticsearch will not attempt to clean up dangling multipart uploads.", + "default": 1000.0, "type": "number" }, "readonly": { "description": "If true, the repository is read-only.\nThe cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it.\n\nOnly a cluster with write access can create snapshots in the repository.\nAll other clusters connected to the repository should have the `readonly` parameter set to `true`.\n\nIf `false`, the cluster can write to the repository and create snapshots in it.\n\nIMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository.\nHaving multiple clusters write to the repository at the same time risks corrupting the contents of the repository.", + "default": false, "type": "boolean" }, "server_side_encryption": { "description": "When set to `true`, files are encrypted on server side using an AES256 algorithm.", + "default": false, "type": "boolean" }, "storage_class": { @@ -98347,6 +99117,7 @@ "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/s3-repository#repository-s3-storage-classes" }, "description": "The S3 storage class for objects written to the repository.\nValues may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia`, and `intelligent_tiering`.", + "default": "standard", "type": "string" }, "throttled_delete_retry.delay_increment": { @@ -98409,10 +99180,12 @@ }, "max_number_of_snapshots": { "description": "The maximum number of snapshots the repository can contain.\nThe default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`.", + "default": 2147483647.0, "type": "number" }, "readonly": { "description": "If `true`, the repository is read-only.\nThe cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it.\n\nOnly a cluster with write access can create snapshots in the repository.\nAll other clusters connected to the repository should have the `readonly` parameter set to `true`.\n\nIf `false`, the cluster can write to the repository and create snapshots in it.\n\nIMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository.\nHaving multiple clusters write to the repository at the same time risks corrupting the contents of the repository.", + "default": false, "type": "boolean" } }, @@ -98461,6 +99234,7 @@ "properties": { "http_max_retries": { "description": "The maximum number of retries for HTTP and HTTPS URLs.", + "default": 5.0, "type": "number" }, "http_socket_timeout": { @@ -98468,6 +99242,7 @@ }, "max_number_of_snapshots": { "description": "The maximum number of snapshots the repository can contain.\nThe default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`.", + "default": 2147483647.0, "type": "number" }, "url": { @@ -98524,10 +99299,12 @@ }, "max_number_of_snapshots": { "description": "The maximum number of snapshots the repository can contain.\nThe default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`.", + "default": 2147483647.0, "type": "number" }, "read_only": { "description": "If `true`, the repository is read-only.\nThe cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it.\n\nOnly a cluster with write access can create snapshots in the repository.\nAll other clusters connected to the repository should have the `readonly` parameter set to `true`.\n\nIf `false`, the cluster can write to the repository and create snapshots in it.\n\nIMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository.\nHaving multiple clusters write to the repository at the same time risks corrupting the contents of the repository.", + "default": false, "type": "boolean" } } @@ -99504,14 +100281,17 @@ "properties": { "align_checkpoints": { "description": "Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align\ncheckpoint ranges with the date histogram interval when date histogram is specified as a group source in the\ntransform config. As a result, less document updates in the destination index will be performed thus improving\noverall performance.", + "default": true, "type": "boolean" }, "dates_as_epoch_millis": { "description": "Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was\nthe default for transforms created before version 7.11. For compatible output set this value to `true`.", + "default": false, "type": "boolean" }, "deduce_mappings": { "description": "Specifies whether the transform should deduce the destination index mappings from the transform configuration.", + "default": true, "type": "boolean" }, "docs_per_second": { @@ -99520,10 +100300,12 @@ }, "max_page_search_size": { "description": "Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker\nexceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the\nmaximum is `65,536`.", + "default": 500.0, "type": "number" }, "unattended": { "description": "If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case\nof an error which means the transform never fails. Setting the number of retries other than infinite fails in\nvalidation.", + "default": false, "x-state": "Generally available; Added in 8.5.0", "type": "boolean" } @@ -100238,6 +101020,7 @@ "type": "object", "properties": { "lang": { + "default": "painless", "type": "string" }, "params": { @@ -100310,6 +101093,7 @@ "type": "object", "properties": { "explain": { + "default": false, "type": "boolean" }, "id": { @@ -100322,6 +101106,7 @@ } }, "profile": { + "default": false, "type": "boolean" }, "source": { @@ -100637,6 +101422,7 @@ "type": "boolean" }, "retries": { + "default": 40.0, "type": "number" }, "interval": { @@ -106962,6 +107748,23 @@ "count", "api_keys" ] + }, + "examples": { + "QueryApiKeysResponseExample1": { + "summary": "Query API keys by ID", + "description": "A successful response from `GET /_security/_query/api_key?with_limited_by=true`. The `limited_by` details are the owner user's permissions associated with the API key. It is a point-in-time snapshot captured at creation and subsequent updates. An API key's effective permissions are an intersection of its assigned privileges and the owner user's permissions.\n", + "value": "{\n \"api_keys\": [\n {\n \"id\": \"VuaCfGcBCdbkQm-e5aOx\",\n \"name\": \"application-key-1\",\n \"creation\": 1548550550158,\n \"expiration\": 1548551550158,\n \"invalidated\": false,\n \"username\": \"myuser\",\n \"realm\": \"native1\",\n \"realm_type\": \"native\",\n \"metadata\": {\n \"application\": \"my-application\"\n },\n \"role_descriptors\": { },\n \"limited_by\": [ \n {\n \"role-power-user\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"*\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample2": { + "summary": "Query API keys with pagination", + "description": "An abbreviated response from `GET /_security/_query/api_key` that contains a list of matched API keys along with their sort values. The first sort value is creation time, which is displayed in `date_time` format. The second sort value is the API key name.\n", + "value": "{\n \"total\": 100,\n \"count\": 10,\n \"api_keys\": [\n {\n \"id\": \"CLXgVnsBOGkf8IyjcXU7\",\n \"name\": \"app1-key-79\",\n \"creation\": 1629250154811,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:14.811Z\", \n \"app1-key-79\" \n ]\n },\n {\n \"id\": \"BrXgVnsBOGkf8IyjbXVB\",\n \"name\": \"app1-key-78\",\n \"creation\": 1629250153794,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:13.794Z\",\n \"app1-key-78\"\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample3": { + "summary": "Query all API keys", + "description": "A successful response from `GET /_security/_query/api_key`. It includes the role descriptors that are assigned to each API key when it was created or last updated. Note that an API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of the owner user's permissions. An empty role descriptors object means the API key inherits the owner user's permissions.\n", + "value": "{\n \"total\": 3,\n \"count\": 3,\n \"api_keys\": [ \n {\n \"id\": \"nkvrGXsB8w290t56q3Rg\",\n \"name\": \"my-api-key-1\",\n \"creation\": 1628227480421,\n \"expiration\": 1629091480421,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"realm_type\": \"reserved\",\n \"metadata\": {\n \"letter\": \"a\"\n },\n \"role_descriptors\": { \n \"role-a\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index-a\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n },\n {\n \"id\": \"oEvrGXsB8w290t5683TI\",\n \"name\": \"my-api-key-2\",\n \"creation\": 1628227498953,\n \"expiration\": 1628313898953,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"metadata\": {\n \"letter\": \"b\"\n },\n \"role_descriptors\": { } \n }\n ]\n}" + } } } } @@ -109193,14 +109996,14 @@ "description": "List of columns to appear in the response. Supports simple wildcards.", "deprecated": false, "schema": { - "$ref": "#/components/schemas/_types.Names" + "$ref": "#/components/schemas/cat._types.CatShardColumns" }, "style": "form" }, "cat.shards-s": { "in": "query", "name": "s", - "description": "List of columns that determine how the table should be sorted.\nSorting defaults to ascending and can be changed by setting `:asc`\nor `:desc` as a suffix to the column name.", + "description": "A comma-separated list of column names or aliases that determines the sort order.\nSorting defaults to ascending and can be changed by setting `:asc`\nor `:desc` as a suffix to the column name.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.Names" @@ -109210,7 +110013,7 @@ "cat.shards-master_timeout": { "in": "query", "name": "master_timeout", - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.Duration" @@ -109220,7 +110023,7 @@ "cat.shards-time": { "in": "query", "name": "time", - "description": "Unit used to display time values.", + "description": "The unit used to display time values.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.TimeUnit" @@ -109356,14 +110159,14 @@ "description": "List of columns to appear in the response. Supports simple wildcards.", "deprecated": false, "schema": { - "$ref": "#/components/schemas/_types.Names" + "$ref": "#/components/schemas/cat._types.CatThreadPoolColumns" }, "style": "form" }, "cat.thread_pool-s": { "in": "query", "name": "s", - "description": "List of columns that determine how the table should be sorted.\nSorting defaults to ascending and can be changed by setting `:asc`\nor `:desc` as a suffix to the column name.", + "description": "A comma-separated list of column names or aliases that determines the sort order.\nSorting defaults to ascending and can be changed by setting `:asc`\nor `:desc` as a suffix to the column name.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.Names" @@ -109393,7 +110196,7 @@ "cat.thread_pool-master_timeout": { "in": "query", "name": "master_timeout", - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.Duration" @@ -117773,6 +118576,7 @@ }, "explain": { "description": "If true, returns detailed information about score computation as part of a hit.", + "default": false, "type": "boolean" }, "ext": { @@ -117784,6 +118588,7 @@ }, "from": { "description": "Starting document offset. By default, you cannot page through more than 10,000\nhits using the from and size parameters. To page through more hits, use the\nsearch_after parameter.", + "default": 0.0, "type": "number" }, "highlight": { @@ -117864,6 +118669,7 @@ }, "size": { "description": "The number of hits to return. By default, you cannot page through more\nthan 10,000 hits using the from and size parameters. To page through more\nhits, use the search_after parameter.", + "default": 10.0, "type": "number" }, "slice": { @@ -117887,6 +118693,7 @@ }, "terminate_after": { "description": "Maximum number of documents to collect for each shard. If a query reaches this\nlimit, Elasticsearch terminates the query early. Elasticsearch collects documents\nbefore sorting. Defaults to 0, which does not terminate query execution early.", + "default": 0.0, "type": "number" }, "timeout": { @@ -117895,10 +118702,12 @@ }, "track_scores": { "description": "If true, calculate and return document scores, even if the scores are not used for sorting.", + "default": false, "type": "boolean" }, "version": { "description": "If true, returns document version as part of a hit.", + "default": false, "type": "boolean" }, "seq_no_primary_term": { @@ -118191,10 +119000,12 @@ }, "allow_partial_search_results": { "description": "Allow query execution also in case of shard failures.\nIf true, the query will keep running and will return results based on the available shards.\nFor sequences, the behavior can be further refined using allow_partial_sequence_results", + "default": true, "type": "boolean" }, "allow_partial_sequence_results": { "description": "This flag applies only to sequences and has effect only if allow_partial_search_results=true.\nIf true, the sequence query will return results based on the available shards, ignoring the others.\nIf false, the sequence query will return successfully, but will always have empty results.", + "default": false, "type": "boolean" }, "size": { @@ -118222,6 +119033,7 @@ }, "max_samples_per_key": { "description": "By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size`\nparameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the\n`max_samples_per_key` parameter. Pipes are not supported for sample queries.", + "default": 1.0, "type": "number" } }, @@ -118321,6 +119133,7 @@ }, "explain": { "description": "If true, returns detailed information about score computation as part of a hit.", + "default": false, "type": "boolean" }, "ext": { @@ -118332,6 +119145,7 @@ }, "from": { "description": "Starting document offset. By default, you cannot page through more than 10,000\nhits using the from and size parameters. To page through more hits, use the\nsearch_after parameter.", + "default": 0.0, "type": "number" }, "highlight": { @@ -118397,6 +119211,7 @@ }, "size": { "description": "The number of hits to return. By default, you cannot page through more\nthan 10,000 hits using the from and size parameters. To page through more\nhits, use the search_after parameter.", + "default": 10.0, "type": "number" }, "slice": { @@ -118420,6 +119235,7 @@ }, "terminate_after": { "description": "Maximum number of documents to collect for each shard. If a query reaches this\nlimit, Elasticsearch terminates the query early. Elasticsearch collects documents\nbefore sorting. Defaults to 0, which does not terminate query execution early.", + "default": 0.0, "type": "number" }, "timeout": { @@ -118428,10 +119244,12 @@ }, "track_scores": { "description": "If true, calculate and return document scores, even if the scores are not used for sorting.", + "default": false, "type": "boolean" }, "version": { "description": "If true, returns document version as part of a hit.", + "default": false, "type": "boolean" }, "seq_no_primary_term": { @@ -118540,6 +119358,7 @@ }, "explain": { "description": "If `true`, the response includes token attributes and additional details.", + "default": false, "type": "boolean" }, "field": { @@ -118782,6 +119601,7 @@ }, "numeric_detection": { "description": "Automatically map strings into numeric data types for all fields.", + "default": false, "type": "boolean" }, "properties": { @@ -119276,10 +120096,12 @@ }, "model_memory_limit": { "description": "The approximate maximum amount of memory resources that are permitted for\nanalytical processing. If your `elasticsearch.yml` file contains an\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to\ncreate data frame analytics jobs that have `model_memory_limit` values\ngreater than that setting.", + "default": "1gb", "type": "string" }, "max_num_threads": { "description": "The maximum number of threads to be used by the analysis. Using more\nthreads may decrease the time necessary to complete the analysis at the\ncost of using more CPU. Note that the process may use additional threads\nfor operational functionality other than the analysis itself.", + "default": 1.0, "type": "number" }, "analyzed_fields": { @@ -119287,6 +120109,7 @@ }, "allow_lazy_start": { "description": "Specifies whether this job can start when there is insufficient machine\nlearning node capacity for it to be immediately assigned to a node.", + "default": false, "type": "boolean" } } @@ -119308,10 +120131,12 @@ "properties": { "anomaly_score": { "description": "Refer to the description for the `anomaly_score` query parameter.", + "default": 0.0, "type": "number" }, "desc": { "description": "Refer to the description for the `desc` query parameter.", + "default": false, "type": "boolean" }, "end": { @@ -119319,10 +120144,12 @@ }, "exclude_interim": { "description": "Refer to the description for the `exclude_interim` query parameter.", + "default": false, "type": "boolean" }, "expand": { "description": "Refer to the description for the `expand` query parameter.", + "default": false, "type": "boolean" }, "page": { @@ -119407,6 +120234,7 @@ "properties": { "desc": { "description": "Refer to the description for the `desc` query parameter.", + "default": false, "type": "boolean" }, "end": { @@ -119440,6 +120268,7 @@ "properties": { "allow_no_match": { "description": "Refer to the description for the `allow_no_match` query parameter.", + "default": true, "type": "boolean" }, "bucket_span": { @@ -119450,6 +120279,7 @@ }, "exclude_interim": { "description": "Refer to the description for the `exclude_interim` query parameter.", + "default": false, "type": "boolean" }, "overall_score": { @@ -119468,6 +120298,7 @@ }, "top_n": { "description": "Refer to the description for the `top_n` query parameter.", + "default": 1.0, "type": "number" } } @@ -119489,6 +120320,7 @@ "properties": { "desc": { "description": "Refer to the description for the `desc` query parameter.", + "default": false, "type": "boolean" }, "end": { @@ -119496,6 +120328,7 @@ }, "exclude_interim": { "description": "Refer to the description for the `exclude_interim` query parameter.", + "default": false, "type": "boolean" }, "page": { @@ -119503,6 +120336,7 @@ }, "record_score": { "description": "Refer to the description for the `record_score` query parameter.", + "default": 0.0, "type": "number" }, "sort": { @@ -119872,6 +120706,7 @@ }, "explain": { "description": "If `true`, the request returns detailed information about score computation as part of a hit.", + "default": false, "type": "boolean" }, "ext": { @@ -119883,6 +120718,7 @@ }, "from": { "description": "The starting document offset, which must be non-negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 0.0, "type": "number" }, "highlight": { @@ -119946,6 +120782,7 @@ }, "profile": { "description": "Set to `true` to return detailed timing information about the execution of individual components in a search request.\nNOTE: This is a debugging tool and adds significant overhead to search execution.", + "default": false, "type": "boolean" }, "query": { @@ -119980,6 +120817,7 @@ }, "size": { "description": "The number of hits to return, which must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` property.", + "default": 10.0, "type": "number" }, "slice": { @@ -120003,6 +120841,7 @@ }, "terminate_after": { "description": "The maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\n\nIMPORTANT: Use with caution.\nElasticsearch applies this property to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this property for requests that target data streams with backing indices across multiple data tiers.\n\nIf set to `0` (default), the query does not terminate early.", + "default": 0.0, "type": "number" }, "timeout": { @@ -120011,10 +120850,12 @@ }, "track_scores": { "description": "If `true`, calculate and return document scores, even if the scores are not used for sorting.", + "default": false, "type": "boolean" }, "version": { "description": "If `true`, the request returns the document version as part of a hit.", + "default": false, "type": "boolean" }, "seq_no_primary_term": { @@ -120101,14 +120942,17 @@ }, "buffer": { "description": "The size, in pixels, of a clipping buffer outside the tile. This allows renderers\nto avoid outline artifacts from geometries that extend past the extent of the tile.", + "default": 5.0, "type": "number" }, "exact_bounds": { "description": "If `false`, the meta layer's feature is the bounding box of the tile.\nIf `true`, the meta layer's feature is a bounding box resulting from a\n`geo_bounds` aggregation. The aggregation runs on values that intersect\nthe `//` tile with `wrap_longitude` set to `false`. The resulting\nbounding box may be larger than the vector tile.", + "default": false, "type": "boolean" }, "extent": { "description": "The size, in pixels, of a side of the tile. Vector tiles are square with equal sides.", + "default": 4096.0, "type": "number" }, "fields": { @@ -120119,6 +120963,7 @@ }, "grid_precision": { "description": "Additional zoom levels available through the aggs layer. For example, if `` is `7`\nand `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results\ndon't include the aggs layer.", + "default": 8.0, "type": "number" }, "grid_type": { @@ -120132,6 +120977,7 @@ }, "size": { "description": "The maximum number of features to return in the hits layer. Accepts 0-10000.\nIf 0, results don't include the hits layer.", + "default": 10000.0, "type": "number" }, "sort": { @@ -120163,6 +121009,7 @@ "properties": { "explain": { "description": "If `true`, returns detailed information about score calculation as part of each hit.\nIf you specify both this and the `explain` query parameter, the API uses only the query parameter.", + "default": false, "type": "boolean" }, "id": { @@ -120177,6 +121024,7 @@ }, "profile": { "description": "If `true`, the query execution is profiled.", + "default": false, "type": "boolean" }, "source": { @@ -120595,6 +121443,7 @@ }, "enabled": { "description": "Specifies whether the user is enabled.", + "default": true, "type": "boolean" } } @@ -120627,6 +121476,7 @@ }, "from": { "description": "The starting document offset.\nIt must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 0.0, "type": "number" }, "sort": { @@ -120634,6 +121484,7 @@ }, "size": { "description": "The number of hits to return.\nIt must not be negative.\nThe `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 10.0, "type": "number" }, "search_after": { @@ -120646,6 +121497,16 @@ "summary": "Query API keys by ID", "description": "Run `GET /_security/_query/api_key?with_limited_by=true` to retrieve an API key by ID.", "value": "{\n \"query\": {\n \"ids\": {\n \"values\": [\n \"VuaCfGcBCdbkQm-e5aOx\"\n ]\n }\n }\n}" + }, + "QueryApiKeysRequestExample2": { + "summary": "Query API keys with pagination", + "description": "Run `GET /_security/_query/api_key`. Use a `bool` query to issue complex logical conditions and use `from`, `size`, and `sort` to help paginate the result. For example, the API key name must begin with `app1-key-` and must not be `app1-key-01`. It must be owned by a username with the wildcard pattern `org-*-user` and the `environment` metadata field must have a `production` value. The offset to begin the search result is the twentieth (zero-based index) API key. The page size of the response is 10 API keys. The result is first sorted by creation date in descending order, then by name in ascending order.\n", + "value": "{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"prefix\": {\n \"name\": \"app1-key-\" \n }\n },\n {\n \"term\": {\n \"invalidated\": \"false\" \n }\n }\n ],\n \"must_not\": [\n {\n \"term\": {\n \"name\": \"app1-key-01\" \n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"username\": \"org-*-user\" \n }\n },\n {\n \"term\": {\n \"metadata.environment\": \"production\" \n }\n }\n ]\n }\n },\n \"from\": 20, \n \"size\": 10, \n \"sort\": [ \n { \"creation\": { \"order\": \"desc\", \"format\": \"date_time\" } },\n \"name\"\n ]\n}" + }, + "QueryApiKeysRequestExample3": { + "summary": "Query API keys by name", + "description": "Run `GET /_security/_query/api_key` to retrieve the API key by name.", + "value": "{\n \"query\": {\n \"term\": {\n \"name\": {\n \"value\": \"application-key-1\"\n }\n }\n }\n}" } } } @@ -120662,6 +121523,7 @@ }, "from": { "description": "The starting document offset.\nIt must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 0.0, "type": "number" }, "sort": { @@ -120669,6 +121531,7 @@ }, "size": { "description": "The number of hits to return.\nIt must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 10.0, "type": "number" }, "search_after": { @@ -120702,6 +121565,7 @@ }, "from": { "description": "The starting document offset.\nIt must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 0.0, "type": "number" }, "sort": { @@ -120709,6 +121573,7 @@ }, "size": { "description": "The number of hits to return.\nIt must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 10.0, "type": "number" }, "search_after": { @@ -120743,6 +121608,7 @@ }, "size": { "description": "The number of profiles to return.", + "default": 10.0, "type": "number" }, "data": { @@ -120894,10 +121760,12 @@ }, "ignore_unavailable": { "description": "If `true`, the request ignores data streams and indices in `indices` that are missing or closed.\nIf `false`, the request returns an error for any data stream or index that is missing or closed.", + "default": false, "type": "boolean" }, "include_global_state": { "description": "If `true`, the current cluster state is included in the snapshot.\nThe cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies.\nIt also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`).", + "default": true, "type": "boolean" }, "indices": { @@ -120908,6 +121776,7 @@ }, "partial": { "description": "If `true`, it enables you to restore a partial snapshot of indices with unavailable shards.\nOnly shards that were successfully included in the snapshot will be restored.\nAll missing shards will be recreated as empty.\n\nIf `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available.", + "default": false, "type": "boolean" } } @@ -120971,6 +121840,7 @@ "properties": { "allow_partial_search_results": { "description": "If `true`, the response has partial results when there are shard request timeouts or shard failures.\nIf `false`, the API returns an error with no partial results.", + "default": false, "type": "boolean" }, "catalog": { @@ -120982,6 +121852,7 @@ "url": "https://www.elastic.co/docs/explore-analyze/query-filter/languages/sql-rest-columnar" }, "description": "If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results.\nThe API supports this parameter only for CBOR, JSON, SMILE, and YAML responses.", + "default": false, "type": "boolean" }, "cursor": { @@ -120990,10 +121861,12 @@ }, "fetch_size": { "description": "The maximum number of rows (or entries) to return in one response.", + "default": 1000.0, "type": "number" }, "field_multi_value_leniency": { "description": "If `false`, the API returns an exception when encountering multiple values for a field.\nIf `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results.", + "default": false, "type": "boolean" }, "filter": { @@ -121001,6 +121874,7 @@ }, "index_using_frozen": { "description": "If `true`, the search can run on frozen indices.", + "default": false, "type": "boolean" }, "keep_alive": { @@ -121008,6 +121882,7 @@ }, "keep_on_completion": { "description": "If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter.\nIf `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`.", + "default": false, "type": "boolean" }, "page_timeout": { @@ -121059,6 +121934,7 @@ "properties": { "fetch_size": { "description": "The maximum number of rows (or entries) to return in one response.", + "default": 1000.0, "type": "number" }, "filter": { @@ -121098,6 +121974,7 @@ }, "size": { "description": "The number of matching terms to return.", + "default": 10.0, "type": "number" }, "timeout": { @@ -121105,6 +121982,7 @@ }, "case_insensitive": { "description": "When `true`, the provided search string is matched against index terms without case sensitivity.", + "default": false, "type": "boolean" }, "index_filter": { @@ -121157,22 +122035,27 @@ }, "field_statistics": { "description": "If `true`, the response includes:\n\n* The document count (how many documents contain this field).\n* The sum of document frequencies (the sum of document frequencies for all terms in this field).\n* The sum of total term frequencies (the sum of total term frequencies of each term in this field).", + "default": true, "type": "boolean" }, "offsets": { "description": "If `true`, the response includes term offsets.", + "default": true, "type": "boolean" }, "payloads": { "description": "If `true`, the response includes term payloads.", + "default": true, "type": "boolean" }, "positions": { "description": "If `true`, the response includes term positions.", + "default": true, "type": "boolean" }, "term_statistics": { "description": "If `true`, the response includes:\n\n* The total term frequency (how often a term occurs in all documents).\n* The document frequency (the number of documents containing the current term).\n\nBy default these values are not returned since term statistics can have a serious performance impact.", + "default": false, "type": "boolean" }, "routing": { @@ -121343,10 +122226,12 @@ }, "ignore_condition": { "description": "When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter.", + "default": false, "type": "boolean" }, "record_execution": { "description": "When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time.\nIn addition, the status of the watch is updated, possibly throttling subsequent runs.\nThis can also be specified as an HTTP parameter.", + "default": false, "type": "boolean" }, "simulated_actions": { @@ -121365,6 +122250,16 @@ "summary": "Run a watch", "description": "Run `POST _watcher/watch/my_watch/_execute` to run a watch. The input defined in the watch is ignored and the `alternative_input` is used as the payload. The condition as defined by the watch is ignored and is assumed to evaluate to true. The `force_simulate` action forces the simulation of `my-action`. Forcing the simulation means that throttling is ignored and the watch is simulated by Watcher instead of being run normally.\n", "value": "{\n \"trigger_data\" : { \n \"triggered_time\" : \"now\",\n \"scheduled_time\" : \"now\"\n },\n \"alternative_input\" : { \n \"foo\" : \"bar\"\n },\n \"ignore_condition\" : true, \n \"action_modes\" : {\n \"my-action\" : \"force_simulate\" \n },\n \"record_execution\" : true \n}" + }, + "WatcherExecuteRequestExample2": { + "summary": "Run a watch with multiple action modes", + "description": "Run `POST _watcher/watch/my_watch/_execute` and set a different mode for each action.\n", + "value": "{\n \"action_modes\" : {\n \"action1\" : \"force_simulate\",\n \"action2\" : \"skip\"\n }\n}" + }, + "WatcherExecuteRequestExample3": { + "summary": "Run a watch inline", + "description": "Run `POST _watcher/watch/_execute` to run a watch inline. All other settings for this API still apply when inlining a watch. In this example, while the inline watch defines a compare condition, during the execution this condition will be ignored.\n", + "value": "{\n \"watch\" : {\n \"trigger\" : { \"schedule\" : { \"interval\" : \"10s\" } },\n \"input\" : {\n \"search\" : {\n \"request\" : {\n \"indices\" : [ \"logs\" ],\n \"body\" : {\n \"query\" : {\n \"match\" : { \"message\": \"error\" }\n }\n }\n }\n }\n },\n \"condition\" : {\n \"compare\" : { \"ctx.payload.hits.total\" : { \"gt\" : 0 }}\n },\n \"actions\" : {\n \"log_error\" : {\n \"logging\" : {\n \"text\" : \"Found {{ctx.payload.hits.total}} errors in the logs\"\n }\n }\n }\n }\n}" } } } @@ -121423,10 +122318,12 @@ "properties": { "from": { "description": "The offset from the first result to fetch.\nIt must be non-negative.", + "default": 0.0, "type": "number" }, "size": { "description": "The number of hits to return.\nIt must be non-negative.", + "default": 10.0, "type": "number" }, "query": { diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 7a5c026624..26c1ed3fb3 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -5718,6 +5718,7 @@ }, "include_ccs_metadata": { "description": "When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters`\nobject with information about the clusters that participated in the search along with info such as shards\ncount.", + "default": false, "type": "boolean" } }, @@ -8131,6 +8132,7 @@ }, "enabled": { "description": "If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle\nthat's disabled (enabled: `false`) will have no effect on the data stream.", + "default": true, "type": "boolean" } } @@ -11787,6 +11789,7 @@ }, "deprecated": { "description": "Marks this ingest pipeline as deprecated.\nWhen a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.", + "default": false, "type": "boolean" } } @@ -12489,10 +12492,12 @@ "properties": { "allow_no_match": { "description": "Refer to the description for the `allow_no_match` query parameter.", + "default": true, "type": "boolean" }, "force": { "description": "Refer to the descriptiion for the `force` query parameter.", + "default": false, "type": "boolean" }, "timeout": { @@ -12953,6 +12958,7 @@ "properties": { "allow_lazy_start": { "description": "Specifies whether this job can start when there is insufficient machine\nlearning node capacity for it to be immediately assigned to a node. If\nset to `false` and a machine learning node with capacity to run the job\ncannot be immediately found, the API returns an error. If set to `true`,\nthe API does not return an error; the job waits in the `starting` state\nuntil sufficient machine learning node capacity is available. This\nbehavior is also affected by the cluster-wide\n`xpack.ml.max_lazy_ml_nodes` setting.", + "default": false, "type": "boolean" }, "analysis": { @@ -12970,6 +12976,7 @@ }, "max_num_threads": { "description": "The maximum number of threads to be used by the analysis. Using more\nthreads may decrease the time necessary to complete the analysis at the\ncost of using more CPU. Note that the process may use additional threads\nfor operational functionality other than the analysis itself.", + "default": 1.0, "type": "number" }, "_meta": { @@ -12977,6 +12984,7 @@ }, "model_memory_limit": { "description": "The approximate maximum amount of memory resources that are permitted for\nanalytical processing. If your `elasticsearch.yml` file contains an\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try\nto create data frame analytics jobs that have `model_memory_limit` values\ngreater than that setting.", + "default": "1gb", "type": "string" }, "source": { @@ -13274,6 +13282,7 @@ }, "scroll_size": { "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.\nThe maximum value is the value of `index.max_result_window`, which is 10,000 by default.", + "default": 1000.0, "type": "number" }, "headers": { @@ -13670,6 +13679,7 @@ "properties": { "allow_lazy_open": { "description": "Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available.", + "default": false, "type": "boolean" }, "analysis_config": { @@ -13686,6 +13696,7 @@ }, "daily_model_snapshot_retention_after_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`.", + "default": 1.0, "type": "number" }, "data_description": { @@ -13713,6 +13724,7 @@ }, "model_snapshot_retention_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted.", + "default": 10.0, "type": "number" }, "renormalization_window_days": { @@ -16260,10 +16272,12 @@ "properties": { "allow_no_match": { "description": "Refer to the description for the `allow_no_match` query parameter.", + "default": true, "type": "boolean" }, "force": { "description": "Refer to the description for the `force` query parameter.", + "default": false, "type": "boolean" }, "timeout": { @@ -16402,14 +16416,17 @@ }, "model_memory_limit": { "description": "The approximate maximum amount of memory resources that are permitted for\nanalytical processing. If your `elasticsearch.yml` file contains an\n`xpack.ml.max_model_memory_limit` setting, an error occurs when you try\nto create data frame analytics jobs that have `model_memory_limit` values\ngreater than that setting.", + "default": "1gb", "type": "string" }, "max_num_threads": { "description": "The maximum number of threads to be used by the analysis. Using more\nthreads may decrease the time necessary to complete the analysis at the\ncost of using more CPU. Note that the process may use additional threads\nfor operational functionality other than the analysis itself.", + "default": 1.0, "type": "number" }, "allow_lazy_start": { "description": "Specifies whether this job can start when there is insufficient machine\nlearning node capacity for it to be immediately assigned to a node.", + "default": false, "type": "boolean" } } @@ -16607,6 +16624,7 @@ }, "scroll_size": { "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations.\nThe maximum value is the value of `index.max_result_window`.", + "default": 1000.0, "type": "number" } } @@ -16823,6 +16841,7 @@ "properties": { "allow_lazy_open": { "description": "Advanced configuration option. Specifies whether this job can open when\nthere is insufficient machine learning node capacity for it to be\nimmediately assigned to a node. If `false` and a machine learning node\nwith capacity to run the job cannot immediately be found, the open\nanomaly detection jobs API returns an error. However, this is also\nsubject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this\noption is set to `true`, the open anomaly detection jobs API does not\nreturn an error and the job waits in the opening state until sufficient\nmachine learning node capacity is available.", + "default": false, "type": "boolean" }, "analysis_limits": { @@ -16856,10 +16875,12 @@ }, "daily_model_snapshot_retention_after_days": { "description": "Advanced configuration option, which affects the automatic removal of old\nmodel snapshots for this job. It specifies a period of time (in days)\nafter which only the first snapshot per day is retained. This period is\nrelative to the timestamp of the most recent snapshot for this job. Valid\nvalues range from 0 to `model_snapshot_retention_days`. For jobs created\nbefore version 7.8.0, the default value matches\n`model_snapshot_retention_days`.", + "default": 1.0, "type": "number" }, "model_snapshot_retention_days": { "description": "Advanced configuration option, which affects the automatic removal of old\nmodel snapshots for this job. It specifies the maximum period of time (in\ndays) that snapshots are retained. This period is relative to the\ntimestamp of the most recent snapshot for this job.", + "default": 10.0, "type": "number" }, "renormalization_window_days": { @@ -17038,6 +17059,7 @@ "properties": { "number_of_allocations": { "description": "The number of model allocations on each node where the model is deployed.\nAll allocations on a node share the same copy of the model in memory but use\na separate set of threads to evaluate the model.\nIncreasing this value generally increases the throughput.\nIf this setting is greater than the number of hardware threads\nit will automatically be changed to a value less than the number of hardware threads.\nIf adaptive_allocations is enabled, do not set this value, because it’s automatically set.", + "default": 1.0, "type": "number" }, "adaptive_allocations": { @@ -18728,6 +18750,66 @@ "summary": "Reindex multiple sources", "description": "Run `POST _reindex` to reindex from multiple sources. The `index` attribute in source can be a list, which enables you to copy from lots of sources in one request. This example copies documents from the `my-index-000001` and `my-index-000002` indices.\n", "value": "{\n \"source\": {\n \"index\": [\"my-index-000001\", \"my-index-000002\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000002\"\n }\n}" + }, + "ReindexRequestExample10": { + "summary": "Reindex with Painless", + "description": "You can use Painless to reindex daily indices to apply a new template to the existing documents. The script extracts the date from the index name and creates a new index with `-1` appended. For example, all data from `metricbeat-2016.05.31` will be reindexed into `metricbeat-2016.05.31-1`.\n", + "value": "{\n \"source\": {\n \"index\": \"metricbeat-*\"\n },\n \"dest\": {\n \"index\": \"metricbeat\"\n },\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\"\n }\n}" + }, + "ReindexRequestExample11": { + "summary": "Reindex a random subset", + "description": "Run `POST _reindex` to extract a random subset of the source for testing. You might need to adjust the `min_score` value depending on the relative amount of data extracted from source.\n", + "value": "{\n \"max_docs\": 10,\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"function_score\" : {\n \"random_score\" : {},\n \"min_score\" : 0.9\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample12": { + "summary": "Reindex modified documents", + "description": "Run `POST _reindex` to modify documents during reindexing. This example bumps the version of the source document.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\",\n \"version_type\": \"external\"\n },\n \"script\": {\n \"source\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n \"lang\": \"painless\"\n }\n}" + }, + "ReindexRequestExample13": { + "summary": "Reindex from remote on Elastic Cloud", + "description": "When using Elastic Cloud, you can run `POST _reindex` and authenticate against a remote cluster with an API key.\n", + "value": "{\n \"source\": {\n \"remote\": {\n \"host\": \"http://otherhost:9200\",\n \"username\": \"user\",\n \"password\": \"pass\"\n },\n \"index\": \"my-index-000001\",\n \"query\": {\n \"match\": {\n \"test\": \"data\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample2": { + "summary": "Manual slicing", + "description": "Run `POST _reindex` to slice a reindex request manually. Provide a slice ID and total number of slices to each request.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample3": { + "summary": "Automatic slicing", + "description": "Run `POST _reindex?slices=5&refresh` to automatically parallelize using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample4": { + "summary": "Routing", + "description": "By default if reindex sees a document with routing then the routing is preserved unless it's changed by the script. You can set `routing` on the `dest` request to change this behavior. In this example, run `POST _reindex` to copy all documents from the `source` with the company name `cat` into the `dest` with routing set to `cat`.\n", + "value": "{\n \"source\": {\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}" + }, + "ReindexRequestExample5": { + "summary": "Ingest pipelines", + "description": "Run `POST _reindex` and use the ingest pipelines feature.", + "value": "{\n \"source\": {\n \"index\": \"source\"\n },\n \"dest\": {\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n }\n}" + }, + "ReindexRequestExample6": { + "summary": "Reindex with a query", + "description": "Run `POST _reindex` and add a query to the `source` to limit the documents to reindex. For example, this request copies documents into `my-new-index-000001` only if they have a `user.id` of `kimchy`.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample7": { + "summary": "Reindex with max_docs", + "description": "You can limit the number of processed documents by setting `max_docs`. For example, run `POST _reindex` to copy a single document from `my-index-000001` to `my-new-index-000001`.\n", + "value": "{\n \"max_docs\": 1,\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample8": { + "summary": "Reindex selected fields", + "description": "You can use source filtering to reindex a subset of the fields in the original documents. For example, run `POST _reindex` the reindex only the `user.id` and `_doc` fields of each document.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"_source\": [\"user.id\", \"_doc\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample9": { + "summary": "Reindex new field names", + "description": "A reindex operation can build a copy of an index with renamed fields. If your index has documents with `text` and `flag` fields, you can change the latter field name to `tag` during the reindex.\n", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n },\n \"script\": {\n \"source\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n }\n}" } } } @@ -20634,6 +20716,7 @@ }, "owner": { "description": "Query API keys owned by the currently authenticated user.\nThe `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones.\n\nNOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`.", + "default": false, "type": "boolean" }, "realm_name": { @@ -23339,7 +23422,11 @@ "document" ], "summary": "Update a document", - "description": "Update a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).\n\n## Required authorization\n\n* Index privileges: `write`\n", + "description": "Update a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).\nFor usage examples such as partial updates, upserts, and scripted updates, see the External documentation.\n\n## Required authorization\n\n* Index privileges: `write`\n", + "externalDocs": { + "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-document", + "x-previousVersionUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/docs-update.html" + }, "operationId": "update", "parameters": [ { @@ -23503,6 +23590,7 @@ "properties": { "detect_noop": { "description": "If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document.", + "default": true, "type": "boolean" }, "doc": { @@ -23511,6 +23599,7 @@ }, "doc_as_upsert": { "description": "If `true`, use the contents of 'doc' as the value of 'upsert'.\nNOTE: Using ingest pipelines with `doc_as_upsert` is not supported.", + "default": false, "type": "boolean" }, "script": { @@ -23518,6 +23607,7 @@ }, "scripted_upsert": { "description": "If `true`, run the script whether or not the document exists.", + "default": false, "type": "boolean" }, "_source": { @@ -23619,7 +23709,11 @@ "document" ], "summary": "Update documents", - "description": "Updates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.\n\n## Required authorization\n\n* Index privileges: `read`,`write`\n", + "description": "Updates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Refreshing shards**\n\nSpecifying the `refresh` parameter refreshes all shards once the request completes.\nThis is different to the update API's `refresh` parameter, which causes only the shard\nthat received the request to be refreshed. Unlike the update API, it does not support\n`wait_for`.\n\n**Running update by query asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch\nperforms some preflight checks, launches the request, and returns a\n[task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`.\n\n**Waiting for active shards**\n\n`wait_for_active_shards` controls how many copies of a shard must be active\nbefore proceeding with the request. See [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards)\nfor details. `timeout` controls how long each write request waits for unavailable\nshards to become available. Both work exactly the way they work in the\n[Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). Update by query uses scrolled searches, so you can also\nspecify the `scroll` parameter to control how long it keeps the search context\nalive, for example `?scroll=10m`. The default is 5 minutes.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\nRefer to the linked documentation for examples of how to update documents using the `_update_by_query` API:\n\n## Required authorization\n\n* Index privileges: `read`,`write`\n", + "externalDocs": { + "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-by-query-api", + "x-previousVersionUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/docs-update-by-query.html" + }, "operationId": "update-by-query", "parameters": [ { @@ -30016,6 +30110,7 @@ "properties": { "boost": { "description": "Floating point number used to decrease or increase the relevance scores of the query.\nBoost values are relative to the default value of 1.0.\nA boost value between 0 and 1.0 decreases the relevance score.\nA value greater than 1.0 increases the relevance score.", + "default": 1.0, "type": "number" }, "_name": { @@ -30104,6 +30199,7 @@ }, "auto_generate_synonyms_phrase_query": { "description": "If true, match phrase queries are automatically created for multi-term synonyms.", + "default": true, "type": "boolean" }, "operator": { @@ -30172,6 +30268,7 @@ }, "tie_breaker": { "description": "Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses.", + "default": 0.0, "type": "number" } }, @@ -30523,6 +30620,7 @@ }, "factor": { "description": "Optional factor to multiply the field value with.", + "default": 1.0, "type": "number" }, "missing": { @@ -30636,6 +30734,7 @@ }, "explain": { "description": "If `true`, the request returns detailed information about score computation as part of a hit.", + "default": false, "type": "boolean" }, "ext": { @@ -30647,6 +30746,7 @@ }, "from": { "description": "The starting document offset, which must be non-negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 0.0, "type": "number" }, "highlight": { @@ -30707,6 +30807,7 @@ }, "profile": { "description": "Set to `true` to return detailed timing information about the execution of individual components in a search request.\nNOTE: This is a debugging tool and adds significant overhead to search execution.", + "default": false, "type": "boolean" }, "query": { @@ -30741,6 +30842,7 @@ }, "size": { "description": "The number of hits to return, which must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` property.", + "default": 10.0, "type": "number" }, "slice": { @@ -30764,6 +30866,7 @@ }, "terminate_after": { "description": "The maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\n\nIMPORTANT: Use with caution.\nElasticsearch applies this property to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this property for requests that target data streams with backing indices across multiple data tiers.\n\nIf set to `0` (default), the query does not terminate early.", + "default": 0.0, "type": "number" }, "timeout": { @@ -30772,10 +30875,12 @@ }, "track_scores": { "description": "If `true`, calculate and return document scores, even if the scores are not used for sorting.", + "default": false, "type": "boolean" }, "version": { "description": "If `true`, the request returns the document version as part of a hit.", + "default": false, "type": "boolean" }, "seq_no_primary_term": { @@ -30843,10 +30948,12 @@ }, "size": { "description": "The maximum number of hits to return per `inner_hits`.", + "default": 3.0, "type": "number" }, "from": { "description": "Inner hit starting document offset.", + "default": 0.0, "type": "number" }, "collapse": { @@ -30892,6 +30999,7 @@ "$ref": "#/components/schemas/_types.Fields" }, "track_scores": { + "default": false, "type": "boolean" }, "version": { @@ -30978,10 +31086,12 @@ }, "boundary_chars": { "description": "A string that contains each boundary character.", + "default": ".,!? \\t\\n", "type": "string" }, "boundary_max_scan": { "description": "How far to scan for boundary characters.", + "default": 20.0, "type": "number" }, "boundary_scanner": { @@ -30989,6 +31099,7 @@ }, "boundary_scanner_locale": { "description": "Controls which locale is used to search for sentence and word boundaries.\nThis parameter takes a form of a language tag, for example: `\"en-US\"`, `\"fr-FR\"`, `\"ja-JP\"`.", + "default": "Locale.ROOT", "type": "string" }, "force_source": { @@ -31000,6 +31111,7 @@ }, "fragment_size": { "description": "The size of the highlighted fragment in characters.", + "default": 100.0, "type": "number" }, "highlight_filter": { @@ -31017,10 +31129,12 @@ }, "no_match_size": { "description": "The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight.", + "default": 0.0, "type": "number" }, "number_of_fragments": { "description": "The maximum number of fragments to return.\nIf the number of fragments is set to `0`, no fragments are returned.\nInstead, the entire field contents are highlighted and returned.\nThis can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required.\nIf `number_of_fragments` is `0`, `fragment_size` is ignored.", + "default": 5.0, "type": "number" }, "options": { @@ -31034,6 +31148,7 @@ }, "phrase_limit": { "description": "Controls the number of matching phrases in a document that are considered.\nPrevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory.\nWhen using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory.\nOnly supported by the `fvh` highlighter.", + "default": 256.0, "type": "number" }, "post_tags": { @@ -31052,6 +31167,7 @@ }, "require_field_match": { "description": "By default, only fields that contains a query match are highlighted.\nSet to `false` to highlight all fields.", + "default": true, "type": "boolean" }, "tags_schema": { @@ -31428,10 +31544,12 @@ }, "query_weight": { "description": "Relative importance of the original query versus the rescore query.", + "default": 1.0, "type": "number" }, "rescore_query_weight": { "description": "Relative importance of the rescore query versus the original query.", + "default": 1.0, "type": "number" }, "score_mode": { @@ -31999,10 +32117,12 @@ "properties": { "max_expansions": { "description": "Maximum number of variations created.", + "default": 50.0, "type": "number" }, "prefix_length": { "description": "Number of beginning characters left unchanged when creating expansions.", + "default": 0.0, "type": "number" }, "rewrite": { @@ -32010,6 +32130,7 @@ }, "transpositions": { "description": "Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`).", + "default": true, "type": "boolean" }, "fuzziness": { @@ -32065,6 +32186,7 @@ }, "ignore_unmapped": { "description": "Set to `true` to ignore an unmapped field and not match any documents for this query.\nSet to `false` to throw an exception if the field is not mapped.", + "default": false, "type": "boolean" } } @@ -32105,6 +32227,7 @@ }, "ignore_unmapped": { "description": "Set to `true` to ignore an unmapped field and not match any documents for this query.\nSet to `false` to throw an exception if the field is not mapped.", + "default": false, "type": "boolean" } }, @@ -32165,6 +32288,7 @@ "properties": { "ignore_unmapped": { "description": "Set to `true` to ignore an unmapped field and not match any documents for this query.\nSet to `false` to throw an exception if the field is not mapped.", + "default": false, "type": "boolean" } } @@ -32181,6 +32305,7 @@ "properties": { "ignore_unmapped": { "description": "Indicates whether to ignore an unmapped `type` and not return any documents instead of an error.", + "default": false, "type": "boolean" }, "inner_hits": { @@ -32234,6 +32359,7 @@ "properties": { "ignore_unmapped": { "description": "Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error.\nYou can use this parameter to query multiple indices that may not contain the `parent_type`.", + "default": false, "type": "boolean" }, "inner_hits": { @@ -32247,6 +32373,7 @@ }, "score": { "description": "Indicates whether the relevance score of a matching parent document is aggregated into its child documents.", + "default": false, "type": "boolean" } }, @@ -32338,10 +32465,12 @@ }, "max_gaps": { "description": "Maximum number of positions between the matching terms.\nIntervals produced by the rules further apart than this are not considered matches.", + "default": -1.0, "type": "number" }, "ordered": { "description": "If `true`, intervals produced by the rules should appear in the order in which they are specified.", + "default": false, "type": "boolean" }, "filter": { @@ -32447,6 +32576,7 @@ }, "prefix_length": { "description": "Number of beginning characters left unchanged when creating expansions.", + "default": 0.0, "type": "number" }, "term": { @@ -32455,6 +32585,7 @@ }, "transpositions": { "description": "Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`).", + "default": true, "type": "boolean" }, "use_field": { @@ -32474,10 +32605,12 @@ }, "max_gaps": { "description": "Maximum number of positions between the matching terms.\nTerms further apart than this are not considered matches.", + "default": -1.0, "type": "number" }, "ordered": { "description": "If `true`, matching terms must appear in their specified order.", + "default": false, "type": "boolean" }, "query": { @@ -32647,6 +32780,7 @@ }, "auto_generate_synonyms_phrase_query": { "description": "If `true`, match phrase queries are automatically created for multi-term synonyms.", + "default": true, "type": "boolean" }, "cutoff_frequency": { @@ -32661,14 +32795,17 @@ }, "fuzzy_transpositions": { "description": "If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`).", + "default": true, "type": "boolean" }, "lenient": { "description": "If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored.", + "default": false, "type": "boolean" }, "max_expansions": { "description": "Maximum number of terms to which the query will expand.", + "default": 50.0, "type": "number" }, "minimum_should_match": { @@ -32679,6 +32816,7 @@ }, "prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.", + "default": 0.0, "type": "number" }, "query": { @@ -32742,10 +32880,12 @@ }, "fuzzy_transpositions": { "description": "If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`).\nCan be applied to the term subqueries constructed for all terms but the final term.", + "default": true, "type": "boolean" }, "max_expansions": { "description": "Maximum number of terms to which the query will expand.\nCan be applied to the term subqueries constructed for all terms but the final term.", + "default": 50.0, "type": "number" }, "minimum_should_match": { @@ -32756,6 +32896,7 @@ }, "prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.\nCan be applied to the term subqueries constructed for all terms but the final term.", + "default": 0.0, "type": "number" }, "query": { @@ -32797,6 +32938,7 @@ }, "slop": { "description": "Maximum number of positions allowed between matching tokens.", + "default": 0.0, "type": "number" }, "zero_terms_query": { @@ -32823,6 +32965,7 @@ }, "max_expansions": { "description": "Maximum number of terms to which the last provided term of the query value will expand.", + "default": 50.0, "type": "number" }, "query": { @@ -32831,6 +32974,7 @@ }, "slop": { "description": "Maximum number of positions allowed between matching tokens.", + "default": 0.0, "type": "number" }, "zero_terms_query": { @@ -32860,10 +33004,12 @@ }, "boost_terms": { "description": "Each term in the formed query could be further boosted by their tf-idf score.\nThis sets the boost factor to use when using this feature.\nDefaults to deactivated (0).", + "default": 0.0, "type": "number" }, "fail_on_unsupported_field": { "description": "Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`).", + "default": true, "type": "boolean" }, "fields": { @@ -32875,6 +33021,7 @@ }, "include": { "description": "Specifies whether the input documents should also be included in the search results returned.", + "default": false, "type": "boolean" }, "like": { @@ -32897,14 +33044,17 @@ }, "max_query_terms": { "description": "The maximum number of query terms that can be selected.", + "default": 25.0, "type": "number" }, "max_word_length": { "description": "The maximum word length above which the terms are ignored.\nDefaults to unbounded (`0`).", + "default": 0.0, "type": "number" }, "min_doc_freq": { "description": "The minimum document frequency below which the terms are ignored from the input document.", + "default": 5.0, "type": "number" }, "minimum_should_match": { @@ -32912,10 +33062,12 @@ }, "min_term_freq": { "description": "The minimum term frequency below which the terms are ignored from the input document.", + "default": 2.0, "type": "number" }, "min_word_length": { "description": "The minimum word length below which the terms are ignored.", + "default": 0.0, "type": "number" }, "routing": { @@ -33078,6 +33230,7 @@ }, "auto_generate_synonyms_phrase_query": { "description": "If `true`, match phrase queries are automatically created for multi-term synonyms.", + "default": true, "type": "boolean" }, "cutoff_frequency": { @@ -33095,14 +33248,17 @@ }, "fuzzy_transpositions": { "description": "If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`).\nCan be applied to the term subqueries constructed for all terms but the final term.", + "default": true, "type": "boolean" }, "lenient": { "description": "If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored.", + "default": false, "type": "boolean" }, "max_expansions": { "description": "Maximum number of terms to which the query will expand.", + "default": 50.0, "type": "number" }, "minimum_should_match": { @@ -33113,6 +33269,7 @@ }, "prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.", + "default": 0.0, "type": "number" }, "query": { @@ -33121,10 +33278,12 @@ }, "slop": { "description": "Maximum number of positions allowed between matching tokens.", + "default": 0.0, "type": "number" }, "tie_breaker": { "description": "Determines how scores for each per-term blended query and scores across groups are combined.", + "default": 0.0, "type": "number" }, "type": { @@ -33161,6 +33320,7 @@ "properties": { "ignore_unmapped": { "description": "Indicates whether to ignore an unmapped path and not return any documents instead of an error.", + "default": false, "type": "boolean" }, "inner_hits": { @@ -33196,6 +33356,7 @@ }, "ignore_unmapped": { "description": "Indicates whether to ignore an unmapped `type` and not return any documents instead of an error.", + "default": false, "type": "boolean" }, "type": { @@ -33331,6 +33492,7 @@ }, "case_insensitive": { "description": "Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`.\nDefault is `false` which means the case sensitivity of matching depends on the underlying field’s mapping.", + "default": false, "x-state": "Generally available", "type": "boolean" } @@ -33351,6 +33513,7 @@ "properties": { "allow_leading_wildcard": { "description": "If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string.", + "default": true, "type": "boolean" }, "analyzer": { @@ -33359,10 +33522,12 @@ }, "analyze_wildcard": { "description": "If `true`, the query attempts to analyze wildcard terms in the query string.", + "default": false, "type": "boolean" }, "auto_generate_synonyms_phrase_query": { "description": "If `true`, match phrase queries are automatically created for multi-term synonyms.", + "default": true, "type": "boolean" }, "default_field": { @@ -33373,9 +33538,11 @@ }, "enable_position_increments": { "description": "If `true`, enable position increments in queries constructed from a `query_string` search.", + "default": true, "type": "boolean" }, "escape": { + "default": false, "type": "boolean" }, "fields": { @@ -33390,10 +33557,12 @@ }, "fuzzy_max_expansions": { "description": "Maximum number of terms to which the query expands for fuzzy matching.", + "default": 50.0, "type": "number" }, "fuzzy_prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.", + "default": 0.0, "type": "number" }, "fuzzy_rewrite": { @@ -33401,14 +33570,17 @@ }, "fuzzy_transpositions": { "description": "If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`).", + "default": true, "type": "boolean" }, "lenient": { "description": "If `true`, format-based errors, such as providing a text value for a numeric field, are ignored.", + "default": false, "type": "boolean" }, "max_determinized_states": { "description": "Maximum number of automaton states required for the query.", + "default": 10000.0, "type": "number" }, "minimum_should_match": { @@ -33416,6 +33588,7 @@ }, "phrase_slop": { "description": "Maximum number of positions allowed between matching tokens for phrases.", + "default": 0.0, "type": "number" }, "query": { @@ -33771,6 +33944,7 @@ "properties": { "case_insensitive": { "description": "Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`.\nWhen `false`, case sensitivity of matching depends on the underlying field’s mapping.", + "default": false, "x-state": "Generally available", "type": "boolean" }, @@ -33780,6 +33954,7 @@ }, "max_determinized_states": { "description": "Maximum number of automaton states required for the query.", + "default": 10000.0, "type": "number" }, "rewrite": { @@ -33932,10 +34107,12 @@ }, "analyze_wildcard": { "description": "If `true`, the query attempts to analyze wildcard terms in the query string.", + "default": false, "type": "boolean" }, "auto_generate_synonyms_phrase_query": { "description": "If `true`, the parser creates a match_phrase query for each multi-position token.", + "default": true, "type": "boolean" }, "default_operator": { @@ -33953,10 +34130,12 @@ }, "fuzzy_max_expansions": { "description": "Maximum number of terms to which the query expands for fuzzy matching.", + "default": 50.0, "type": "number" }, "fuzzy_prefix_length": { "description": "Number of beginning characters left unchanged for fuzzy matching.", + "default": 0.0, "type": "number" }, "fuzzy_transpositions": { @@ -33965,6 +34144,7 @@ }, "lenient": { "description": "If `true`, format-based errors, such as providing a text value for a numeric field, are ignored.", + "default": false, "type": "boolean" }, "minimum_should_match": { @@ -34209,10 +34389,12 @@ }, "post": { "description": "The number of tokens after the include span that can’t have overlap with the exclude span.", + "default": 0.0, "type": "number" }, "pre": { "description": "The number of tokens before the include span that can’t have overlap with the exclude span.", + "default": 0.0, "type": "number" } }, @@ -34344,14 +34526,17 @@ "properties": { "tokens_freq_ratio_threshold": { "description": "Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned.", + "default": 5.0, "type": "number" }, "tokens_weight_threshold": { "description": "Tokens whose weight is less than this threshold are considered nonsignificant and pruned.", + "default": 0.4, "type": "number" }, "only_score_pruned_tokens": { "description": "Whether to only score pruned tokens, vs only scoring kept tokens.", + "default": false, "type": "boolean" } } @@ -34369,6 +34554,7 @@ }, "case_insensitive": { "description": "Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`.\nWhen `false`, the case sensitivity of matching depends on the underlying field’s mapping.", + "default": false, "x-state": "Generally available", "type": "boolean" } @@ -34561,6 +34747,7 @@ "properties": { "buckets": { "description": "The target number of buckets.", + "default": 10.0, "type": "number" }, "field": { @@ -34915,6 +35102,7 @@ "properties": { "precision_threshold": { "description": "A unique count below which counts are expected to be close to accurate.\nThis allows to trade memory for accuracy.", + "default": 3000.0, "type": "number" }, "rehash": { @@ -34951,14 +35139,17 @@ }, "max_unique_tokens": { "description": "The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1.\nSmaller values use less memory and create fewer categories. Larger values will use more memory and\ncreate narrower categories. Max allowed value is 100.", + "default": 50.0, "type": "number" }, "max_matched_tokens": { "description": "The maximum number of token positions to match on before attempting to merge categories. Larger\nvalues will use more memory and create narrower categories. Max allowed value is 100.", + "default": 5.0, "type": "number" }, "similarity_threshold": { "description": "The minimum percentage of tokens that must match for text to be added to the category bucket. Must\nbe between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory\nusage and create narrower categories.", + "default": 50.0, "type": "number" }, "categorization_filters": { @@ -34977,6 +35168,7 @@ }, "size": { "description": "The number of buckets to return.", + "default": 10.0, "type": "number" }, "min_doc_count": { @@ -35052,6 +35244,7 @@ }, "size": { "description": "The number of composite buckets that should be returned.", + "default": 10.0, "type": "number" }, "sources": { @@ -35427,6 +35620,7 @@ }, "max_docs_per_value": { "description": "Limits how many documents are permitted per choice of de-duplicating value.", + "default": 1.0, "type": "number" }, "script": { @@ -35434,6 +35628,7 @@ }, "shard_size": { "description": "Limits how many top-scoring documents are collected in the sample processed on each shard.", + "default": 100.0, "type": "number" }, "field": { @@ -35495,14 +35690,17 @@ }, "minimum_set_size": { "description": "The minimum size of one item set.", + "default": 1.0, "type": "number" }, "minimum_support": { "description": "The minimum support of one item set.", + "default": 0.1, "type": "number" }, "size": { "description": "The number of top item sets to return.", + "default": 10.0, "type": "number" }, "filter": { @@ -35593,10 +35791,12 @@ }, "other_bucket_key": { "description": "The key with which the other bucket is returned.", + "default": "_other_", "type": "string" }, "keyed": { "description": "By default, the named filters aggregation returns the buckets as an object.\nSet to `false` to return the buckets as an array of objects.", + "default": true, "type": "boolean" } } @@ -35630,6 +35830,7 @@ "properties": { "wrap_longitude": { "description": "Specifies whether the bounding box should be allowed to overlap the international date line.", + "default": true, "type": "boolean" } } @@ -35741,6 +35942,7 @@ }, "size": { "description": "The maximum number of geohash buckets to return.", + "default": 10000.0, "type": "number" } } @@ -35776,6 +35978,7 @@ }, "size": { "description": "The maximum length of the line represented in the aggregation.\nValid sizes are between 1 and 10000.", + "default": 10000.0, "type": "number" } }, @@ -35826,6 +36029,7 @@ }, "size": { "description": "The maximum number of buckets to return.", + "default": 10000.0, "type": "number" }, "bounds": { @@ -35851,6 +36055,7 @@ }, "precision": { "description": "Integer zoom of the key used to defined cells or buckets\nin the results. Value should be between 0-15.", + "default": 6.0, "type": "number" }, "bounds": { @@ -35858,6 +36063,7 @@ }, "size": { "description": "Maximum number of buckets to return.", + "default": 10000.0, "type": "number" }, "shard_size": { @@ -35925,6 +36131,7 @@ }, "keyed": { "description": "If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys.", + "default": false, "type": "boolean" } } @@ -36016,10 +36223,12 @@ }, "is_ipv6": { "description": "Defines whether the prefix applies to IPv6 addresses.", + "default": false, "type": "boolean" }, "append_prefix_length": { "description": "Defines whether the prefix length is appended to IP address keys in the response.", + "default": false, "type": "boolean" }, "keyed": { @@ -36028,6 +36237,7 @@ }, "min_doc_count": { "description": "Minimum number of documents in a bucket for it to be included in the response.", + "default": 1.0, "type": "number" } }, @@ -36080,6 +36290,7 @@ }, "num_top_feature_importance_values": { "description": "Specifies the maximum number of feature importance values per document.", + "default": 0.0, "type": "number" } } @@ -36093,6 +36304,7 @@ }, "num_top_feature_importance_values": { "description": "Specifies the maximum number of feature importance values per document.", + "default": 0.0, "type": "number" }, "prediction_field_type": { @@ -36176,6 +36388,7 @@ "properties": { "compression": { "description": "Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error.", + "default": 1000.0, "type": "number" } } @@ -36455,6 +36668,7 @@ }, "shift": { "description": "By default, the window consists of the last n values excluding the current bucket.\nIncreasing `shift` by 1, moves the starting window position by 1 to the right.", + "default": 0.0, "type": "number" }, "keyed": { @@ -36478,6 +36692,7 @@ }, "shift": { "description": "By default, the window consists of the last n values excluding the current bucket.\nIncreasing `shift` by 1, moves the starting window position by 1 to the right.", + "default": 0.0, "type": "number" }, "window": { @@ -36504,10 +36719,12 @@ }, "min_doc_count": { "description": "The minimum number of documents in a bucket for it to be returned.", + "default": 1.0, "type": "number" }, "shard_min_doc_count": { "description": "The minimum number of documents in a bucket on each shard for it to be returned.", + "default": 1.0, "type": "number" }, "shard_size": { @@ -36516,10 +36733,12 @@ }, "show_term_doc_count_error": { "description": "Calculates the doc count error on per term basis.", + "default": false, "type": "boolean" }, "size": { "description": "The number of term buckets should be returned out of the overall terms list.", + "default": 10.0, "type": "number" }, "terms": { @@ -36623,6 +36842,7 @@ "properties": { "keyed": { "description": "By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array.\nSet to `false` to disable this behavior.", + "default": true, "type": "boolean" }, "values": { @@ -36678,6 +36898,7 @@ "properties": { "keyed": { "description": "By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array.\nSet to `false` to disable this behavior.", + "default": true, "type": "boolean" }, "percents": { @@ -36771,6 +36992,7 @@ }, "max_doc_count": { "description": "The maximum number of documents a term should appear in.", + "default": 1.0, "type": "number" }, "missing": { @@ -36778,6 +37000,7 @@ }, "precision": { "description": "The precision of the internal CuckooFilters.\nSmaller precision leads to better approximation, but higher memory usage.", + "default": 0.001, "type": "number" }, "value_type": { @@ -36837,6 +37060,7 @@ "properties": { "shard_size": { "description": "Limits how many top-scoring documents are collected in the sample processed on each shard.", + "default": 100.0, "type": "number" } } @@ -36924,6 +37148,7 @@ }, "min_doc_count": { "description": "Only return terms that are found in more than `min_doc_count` hits.", + "default": 3.0, "type": "number" }, "mutual_information": { @@ -37051,6 +37276,7 @@ }, "min_doc_count": { "description": "Only return values that are found in more than `min_doc_count` hits.", + "default": 3.0, "type": "number" }, "mutual_information": { @@ -37111,6 +37337,7 @@ "properties": { "show_distribution": { "description": "Shows the probability distribution for all characters.", + "default": false, "type": "boolean" } } @@ -37162,6 +37389,7 @@ }, "min_doc_count": { "description": "Only return values that are found in more than `min_doc_count` hits.", + "default": 1.0, "type": "number" }, "missing": { @@ -37197,6 +37425,7 @@ }, "size": { "description": "The number of buckets returned out of the overall terms list.", + "default": 10.0, "type": "number" }, "format": { @@ -37216,6 +37445,7 @@ "properties": { "size": { "description": "The maximum number of results to return.", + "default": 10000.0, "type": "number" }, "keyed": { @@ -37243,6 +37473,7 @@ }, "explain": { "description": "If `true`, returns detailed information about score computation as part of a hit.", + "default": false, "type": "boolean" }, "fields": { @@ -37254,6 +37485,7 @@ }, "from": { "description": "Starting document offset.", + "default": 0.0, "type": "number" }, "highlight": { @@ -37268,6 +37500,7 @@ }, "size": { "description": "The maximum number of top matching hits to return per bucket.", + "default": 3.0, "type": "number" }, "sort": { @@ -37281,10 +37514,12 @@ }, "track_scores": { "description": "If `true`, calculates and returns document scores, even if the scores are not used for sorting.", + "default": false, "type": "boolean" }, "version": { "description": "If `true`, returns document version as part of a hit.", + "default": false, "type": "boolean" }, "seq_no_primary_term": { @@ -37365,6 +37600,7 @@ }, "size": { "description": "The number of top documents from which to return metrics.", + "default": 1.0, "type": "number" }, "sort": { @@ -37458,6 +37694,7 @@ }, "buckets": { "description": "The target number of buckets.", + "default": 10.0, "type": "number" }, "shard_size": { @@ -37548,6 +37785,7 @@ }, "require_alias": { "description": "If `true`, the request's actions must target an index alias.", + "default": false, "type": "boolean" } } @@ -37600,6 +37838,7 @@ "properties": { "require_alias": { "description": "If `true`, the request's actions must target an index alias.", + "default": false, "type": "boolean" }, "retry_on_conflict": { @@ -37625,6 +37864,7 @@ "properties": { "detect_noop": { "description": "If true, the `result` in the response is set to 'noop' when no changes to the document occur.", + "default": true, "type": "boolean" }, "doc": { @@ -37633,6 +37873,7 @@ }, "doc_as_upsert": { "description": "Set to `true` to use the contents of `doc` as the value of `upsert`.", + "default": false, "type": "boolean" }, "script": { @@ -37640,6 +37881,7 @@ }, "scripted_upsert": { "description": "Set to `true` to run the script whether or not the document exists.", + "default": false, "type": "boolean" }, "_source": { @@ -39709,15 +39951,18 @@ "$ref": "#/components/schemas/indices._types.IndexCheckOnStartup" }, "codec": { + "default": "LZ4", "type": "string" }, "routing_partition_size": { "$ref": "#/components/schemas/_spec_utils.Stringifiedinteger" }, "load_fixed_bitset_filters_eagerly": { + "default": true, "type": "boolean" }, "hidden": { + "default": "false", "oneOf": [ { "type": "boolean" @@ -39728,6 +39973,7 @@ ] }, "auto_expand_replicas": { + "default": "false", "oneOf": [ { "type": "string" @@ -39747,24 +39993,31 @@ "$ref": "#/components/schemas/_types.Duration" }, "max_result_window": { + "default": 10000.0, "type": "number" }, "max_inner_result_window": { + "default": 100.0, "type": "number" }, "max_rescore_window": { + "default": 10000.0, "type": "number" }, "max_docvalue_fields_search": { + "default": 100.0, "type": "number" }, "max_script_fields": { + "default": 32.0, "type": "number" }, "max_ngram_diff": { + "default": 1.0, "type": "number" }, "max_shingle_diff": { + "default": 3.0, "type": "number" }, "blocks": { @@ -39780,9 +40033,11 @@ "$ref": "#/components/schemas/indices._types.SettingsHighlight" }, "max_terms_count": { + "default": 65536.0, "type": "number" }, "max_regex_length": { + "default": 1000.0, "type": "number" }, "routing": { @@ -39895,6 +40150,7 @@ "properties": { "enabled": { "description": "Indicates whether soft deletes are enabled on the index.", + "default": true, "type": "boolean" }, "retention_lease": { @@ -40135,6 +40391,7 @@ "type": "object", "properties": { "max_analyzed_offset": { + "default": 1000000.0, "type": "number" } } @@ -40244,6 +40501,7 @@ }, "origination_date": { "description": "If specified, this is the timestamp used to calculate the index age for its phase transitions. Use this setting\nif you create a new index that contains old data and want to use the original creation date to calculate the index\nage. Specified as a Unix epoch value in milliseconds.", + "default": 0.0, "type": "number" }, "parse_origination_date": { @@ -40255,10 +40513,12 @@ }, "rollover_alias": { "description": "The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action.\nWhen the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more\ninformation about rolling indices, see Rollover.", + "default": "", "type": "string" }, "prefer_ilm": { "description": "Preference for the system that manages a data stream backing index (preferring ILM when both ILM and DLM are\napplicable for an index).", + "default": "true", "oneOf": [ { "type": "boolean" @@ -40604,6 +40864,7 @@ }, "max_output_size": { "description": "The maximum token size to emit. Tokens larger than this size will be discarded.\nDefaults to `255`", + "default": 255.0, "type": "number" }, "separator": { @@ -40694,10 +40955,12 @@ }, "lowercase": { "description": "Should terms be lowercased or not.\nDefaults to `true`.", + "default": true, "type": "boolean" }, "pattern": { "description": "A Java regular expression.\nDefaults to `\\W+`.", + "default": "\\W+", "type": "string" }, "stopwords": { @@ -40740,6 +41003,7 @@ }, "max_token_length": { "description": "The maximum token length. If a token is seen that exceeds this length then it is split at `max_token_length` intervals.\nDefaults to `255`.", + "default": 255.0, "type": "number" }, "stopwords": { @@ -44804,6 +45068,7 @@ ] }, "buffer_size": { + "default": 256.0, "type": "number" } }, @@ -44879,6 +45144,7 @@ "type": "number" }, "token_chars": { + "default": [], "type": "array", "items": { "$ref": "#/components/schemas/_types.analysis.TokenChar" @@ -45525,6 +45791,7 @@ "properties": { "limit": { "description": "The maximum number of fields in an index. Field and object mappings, as well as field aliases count towards this limit.\nThe limit is in place to prevent mappings and searches from becoming too large. Higher values can lead to performance\ndegradations and memory issues, especially in clusters with a high load or few resources.", + "default": "1000", "oneOf": [ { "type": "number" @@ -45536,6 +45803,7 @@ }, "ignore_dynamic_beyond_limit": { "description": "This setting determines what happens when a dynamically mapped field would exceed the total fields limit. When set\nto false (the default), the index request of the document that tries to add a dynamic field to the mapping will fail\nwith the message Limit of total fields [X] has been exceeded. When set to true, the index request will not fail.\nInstead, fields that would exceed the limit are not added to the mapping, similar to dynamic: false.\nThe fields that were not added to the mapping will be added to the _ignored field.", + "default": "false", "oneOf": [ { "type": "boolean" @@ -45552,6 +45820,7 @@ "properties": { "limit": { "description": "The maximum depth for a field, which is measured as the number of inner objects. For instance, if all fields are defined\nat the root object level, then the depth is 1. If there is one object mapping, then the depth is 2, etc.", + "default": 20.0, "type": "number" } } @@ -45561,6 +45830,7 @@ "properties": { "limit": { "description": "The maximum number of distinct nested mappings in an index. The nested type should only be used in special cases, when\narrays of objects need to be queried independently of each other. To safeguard against poorly designed mappings, this\nsetting limits the number of unique nested types per index.", + "default": 50.0, "type": "number" } } @@ -45570,6 +45840,7 @@ "properties": { "limit": { "description": "The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps\nto prevent out of memory errors when a document contains too many nested objects.", + "default": 10000.0, "type": "number" } } @@ -46961,6 +47232,7 @@ }, "index": { "description": "If `true`, you can search this field using the kNN search API.", + "default": true, "type": "boolean" }, "index_options": { @@ -46993,10 +47265,12 @@ }, "ef_construction": { "description": "The number of candidates to track while assembling the list of nearest neighbors for each new node.\n\nOnly applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types.", + "default": 100.0, "type": "number" }, "m": { "description": "The number of neighbors each node will be connected to in the HNSW graph.\n\nOnly applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types.", + "default": 16.0, "type": "number" }, "type": { @@ -47756,6 +48030,7 @@ }, "time_series_dimension": { "description": "For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false.", + "default": false, "x-state": "Technical preview", "type": "boolean" } @@ -48296,6 +48571,7 @@ }, "is_write_index": { "description": "If `true`, the index is the write index for the alias.", + "default": false, "type": "boolean" }, "routing": { @@ -48308,6 +48584,7 @@ }, "is_hidden": { "description": "If `true`, the alias is hidden.\nAll indices for the alias must have the same `is_hidden` value.", + "default": false, "x-state": "Generally available", "type": "boolean" } @@ -48376,6 +48653,7 @@ }, "enabled": { "description": "If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle\nthat's disabled (enabled: `false`) will have no effect on the data stream.", + "default": true, "type": "boolean" } } @@ -48747,10 +49025,12 @@ }, "is_hidden": { "description": "If `true`, the alias is hidden.\nAll indices for the alias must have the same `is_hidden` value.", + "default": false, "type": "boolean" }, "is_write_index": { "description": "If `true`, the index is the write index for the alias.", + "default": false, "type": "boolean" }, "routing": { @@ -50423,14 +50703,17 @@ }, "min_doc_count": { "description": "Specifies how many documents must contain a pair of terms before it is considered to be a useful connection.\nThis setting acts as a certainty threshold.", + "default": 3.0, "type": "number" }, "shard_min_doc_count": { "description": "Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration.", + "default": 2.0, "type": "number" }, "size": { "description": "Specifies the maximum number of vertex terms returned for each field.", + "default": 5.0, "type": "number" } }, @@ -50460,6 +50743,7 @@ }, "sample_size": { "description": "Each hop considers a sample of the best-matching documents on each shard.\nUsing samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms.\nVery small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms.\nVery large sample sizes can dilute the quality of the results and increase execution times.", + "default": 100.0, "type": "number" }, "timeout": { @@ -51073,10 +51357,12 @@ "properties": { "hidden": { "description": "If true, the data stream is hidden.", + "default": false, "type": "boolean" }, "allow_custom_routing": { "description": "If true, the data stream supports custom routing.", + "default": false, "type": "boolean" } } @@ -51367,6 +51653,7 @@ }, "is_hidden": { "description": "If `true`, the alias is hidden.", + "default": false, "type": "boolean" }, "is_write_index": { @@ -51381,6 +51668,7 @@ }, "must_exist": { "description": "If `true`, the alias must exist to perform the action.", + "default": false, "type": "boolean" } } @@ -51413,6 +51701,7 @@ }, "must_exist": { "description": "If `true`, the alias must exist to perform the action.", + "default": false, "type": "boolean" } } @@ -51428,6 +51717,7 @@ }, "must_exist": { "description": "If `true`, the alias must exist to perform the action.", + "default": false, "type": "boolean" } } @@ -51797,18 +52087,22 @@ "properties": { "max_chunk_size": { "description": "The maximum size of a chunk in words.\nThis value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy).", + "default": 250.0, "type": "number" }, "overlap": { "description": "The number of overlapping words for chunks.\nIt is applicable only to a `word` chunking strategy.\nThis value cannot be higher than half the `max_chunk_size` value.", + "default": 100.0, "type": "number" }, "sentence_overlap": { "description": "The number of overlapping sentences for chunks.\nIt is applicable only for a `sentence` chunking strategy.\nIt can be either `1` or `0`.", + "default": 1.0, "type": "number" }, "strategy": { "description": "The chunking strategy: `sentence` or `word`.", + "default": "sentence", "type": "string" } } @@ -52099,6 +52393,7 @@ "properties": { "max_new_tokens": { "description": "For a `completion` task, it sets the maximum number for the output tokens to be generated.", + "default": 64.0, "type": "number" }, "temperature": { @@ -52291,6 +52586,7 @@ }, "max_new_tokens": { "description": "For a `completion` task, provide a hint for the maximum number of output tokens to be generated.", + "default": 64.0, "type": "number" }, "temperature": { @@ -52609,6 +52905,7 @@ "properties": { "enabled": { "description": "Turn on `adaptive_allocations`.", + "default": false, "type": "boolean" }, "max_number_of_allocations": { @@ -52626,6 +52923,7 @@ "properties": { "return_documents": { "description": "For a `rerank` task, return the document instead of only the index.", + "default": true, "type": "boolean" } } @@ -53160,6 +53458,7 @@ }, "url": { "description": "The URL endpoint to use for the requests.\nIt can be changed for testing purposes.", + "default": "https://api.openai.com/v1/embeddings.", "type": "string" } }, @@ -53262,6 +53561,7 @@ }, "return_documents": { "description": "Whether to return the source documents in the response.\nOnly for the `rerank` task type.", + "default": false, "type": "boolean" }, "top_k": { @@ -53270,6 +53570,7 @@ }, "truncation": { "description": "Whether to truncate the input texts to fit within the context length.", + "default": true, "type": "boolean" } } @@ -53519,6 +53820,7 @@ }, "deprecated": { "description": "Marks this ingest pipeline as deprecated.\nWhen a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.", + "default": false, "type": "boolean" }, "_meta": { @@ -53695,6 +53997,7 @@ }, "allow_duplicates": { "description": "If `false`, the processor does not append values already present in the field.", + "default": true, "type": "boolean" } }, @@ -53745,10 +54048,12 @@ }, "ignore_missing": { "description": "If `true` and field does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "indexed_chars": { "description": "The number of chars being used for extraction to prevent huge fields.\nUse `-1` for no limit.", + "default": 100000.0, "type": "number" }, "indexed_chars_field": { @@ -53766,6 +54071,7 @@ }, "remove_binary": { "description": "If true, the binary field will be removed from the document", + "default": false, "type": "boolean" }, "resource_name": { @@ -53792,6 +54098,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -53821,6 +54128,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "shape_type": { @@ -53882,10 +54190,12 @@ }, "seed": { "description": "Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The\nseed can prevent hash collisions between network domains, such as a staging\nand production network that use the same addressing scheme.", + "default": 0.0, "type": "number" }, "ignore_missing": { "description": "If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.", + "default": true, "type": "boolean" } } @@ -53905,6 +54215,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -53955,10 +54266,12 @@ }, "quote": { "description": "Quote used in CSV, has to be single character string.", + "default": "\"", "type": "string" }, "separator": { "description": "Separator used in CSV, has to be single character string.", + "default": ",", "type": "string" }, "target_fields": { @@ -53996,6 +54309,7 @@ }, "locale": { "description": "The locale to use when parsing the date, relevant when parsing month names or week days.\nSupports template snippets.", + "default": "ENGLISH", "type": "string" }, "target_field": { @@ -54003,10 +54317,12 @@ }, "timezone": { "description": "The timezone to use when parsing the date.\nSupports template snippets.", + "default": "UTC", "type": "string" }, "output_format": { "description": "The format to use when writing the date to target_field. Must be a valid\njava time pattern.", + "default": "yyyy-MM-dd'T'HH:mm:ss.SSSXXX", "type": "string" } }, @@ -54041,6 +54357,7 @@ }, "index_name_format": { "description": "The format to be used when printing the parsed date into the index name.\nA valid java time pattern is expected here.\nSupports template snippets.", + "default": "yyyy-MM-dd", "type": "string" }, "index_name_prefix": { @@ -54049,10 +54366,12 @@ }, "locale": { "description": "The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days.", + "default": "ENGLISH", "type": "string" }, "timezone": { "description": "The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.", + "default": "UTC", "type": "string" } }, @@ -54073,6 +54392,7 @@ "properties": { "append_separator": { "description": "The character(s) that separate the appended fields.", + "default": "\"\"", "type": "string" }, "field": { @@ -54080,6 +54400,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "pattern": { @@ -54107,6 +54428,7 @@ }, "override": { "description": "Controls the behavior when there is already an existing nested object that conflicts with the expanded field.\nWhen `false`, the processor will merge conflicts by combining the old and the new values into an array.\nWhen `true`, the value from the expanded field will overwrite the existing value.", + "default": false, "type": "boolean" }, "path": { @@ -54143,14 +54465,17 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "max_matches": { "description": "The maximum number of matched documents to include under the configured target field.\nThe `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object.\nIn order to avoid documents getting too large, the maximum allowed value is 128.", + "default": 1.0, "type": "number" }, "override": { "description": "If processor will update fields with pre-existing non-null-valued field.\nWhen set to `false`, such fields will not be touched.", + "default": true, "type": "boolean" }, "policy_name": { @@ -54223,6 +54548,7 @@ }, "ignore_missing": { "description": "If true, the processor ignores any missing fields. If all fields are\nmissing, the processor silently exits without modifying the document.", + "default": false, "type": "boolean" } }, @@ -54255,6 +54581,7 @@ }, "ignore_missing": { "description": "If `true`, the processor silently exits without changing the document if the `field` is `null` or missing.", + "default": false, "type": "boolean" }, "processor": { @@ -54278,6 +54605,7 @@ "properties": { "database_file": { "description": "The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory.", + "default": "GeoLite2-City.mmdb", "type": "string" }, "field": { @@ -54285,10 +54613,12 @@ }, "first_only": { "description": "If `true`, only the first found IP location data will be returned, even if the field contains an array.", + "default": true, "type": "boolean" }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "properties": { @@ -54344,6 +54674,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_format": { @@ -54382,6 +54713,7 @@ "properties": { "database_file": { "description": "The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory.", + "default": "GeoLite2-City.mmdb", "type": "string" }, "field": { @@ -54389,10 +54721,12 @@ }, "first_only": { "description": "If `true`, only the first found geoip data will be returned, even if the field contains an array.", + "default": true, "type": "boolean" }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "properties": { @@ -54426,6 +54760,7 @@ "properties": { "ecs_compatibility": { "description": "Must be disabled or v1. If v1, the processor uses patterns with Elastic\nCommon Schema (ECS) field names.", + "default": "disabled", "type": "string" }, "field": { @@ -54433,6 +54768,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "pattern_definitions": { @@ -54451,6 +54787,7 @@ }, "trace_match": { "description": "When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched.", + "default": false, "type": "boolean" } }, @@ -54477,6 +54814,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "pattern": { @@ -54512,6 +54850,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document,", + "default": false, "type": "boolean" }, "target_field": { @@ -54594,6 +54933,7 @@ }, "num_top_feature_importance_values": { "description": "Specifies the maximum number of feature importance values per document.", + "default": 0.0, "type": "number" } } @@ -54603,10 +54943,12 @@ "properties": { "num_top_classes": { "description": "Specifies the number of top class predictions to return.", + "default": 0.0, "type": "number" }, "num_top_feature_importance_values": { "description": "Specifies the maximum number of feature importance values per document.", + "default": 0.0, "type": "number" }, "results_field": { @@ -54672,6 +55014,7 @@ "properties": { "add_to_root": { "description": "Flag that forces the parsed JSON to be added at the top level of the document.\n`target_field` must not be set when this option is chosen.", + "default": false, "type": "boolean" }, "add_to_root_conflict_strategy": { @@ -54679,6 +55022,7 @@ }, "allow_duplicate_keys": { "description": "When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys.\nInstead, the last encountered value for any duplicate key wins.", + "default": false, "type": "boolean" }, "field": { @@ -54725,6 +55069,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "include_keys": { @@ -54736,10 +55081,12 @@ }, "prefix": { "description": "Prefix to be added to extracted keys.", + "default": "null", "type": "string" }, "strip_brackets": { "description": "If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `\"` from extracted values.", + "default": false, "type": "boolean" }, "target_field": { @@ -54779,6 +55126,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -54820,6 +55168,7 @@ }, "ignore_missing": { "description": "If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.", + "default": true, "type": "boolean" } } @@ -54839,6 +55188,7 @@ }, "ignore_missing_pipeline": { "description": "Whether to ignore missing pipelines instead of failing.", + "default": false, "type": "boolean" } }, @@ -54874,22 +55224,27 @@ }, "prefix": { "description": "Start a redacted section with this token", + "default": "<", "type": "string" }, "suffix": { "description": "End a redacted section with this token", + "default": ">", "type": "string" }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "skip_if_unlicensed": { "description": "If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document", + "default": false, "type": "boolean" }, "trace_redact": { "description": "If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted", + "default": false, "x-state": "Generally available", "type": "boolean" } @@ -54917,6 +55272,7 @@ }, "ignore_missing": { "description": "If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.", + "default": true, "type": "boolean" } }, @@ -54942,6 +55298,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" } }, @@ -54964,6 +55321,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -55065,6 +55423,7 @@ }, "ignore_empty_value": { "description": "If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "media_type": { @@ -55073,6 +55432,7 @@ }, "override": { "description": "If `true` processor will update fields with pre-existing non-null-valued field.\nWhen set to `false`, such fields will not be touched.", + "default": true, "type": "boolean" }, "value": { @@ -55148,10 +55508,12 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "preserve_trailing": { "description": "Preserves empty trailing fields, if any.", + "default": false, "type": "boolean" }, "separator": { @@ -55192,6 +55554,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -55217,6 +55580,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -55242,6 +55606,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "target_field": { @@ -55267,14 +55632,17 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "keep_original": { "description": "If `true`, the processor copies the unparsed URI to `.original`.", + "default": true, "type": "boolean" }, "remove_if_successful": { "description": "If `true`, the processor removes the `field` after parsing the URI string.\nIf parsing fails, the processor does not remove the `field`.", + "default": false, "type": "boolean" }, "target_field": { @@ -55300,6 +55668,7 @@ }, "ignore_missing": { "description": "If `true` and `field` does not exist, the processor quietly exits without modifying the document.", + "default": false, "type": "boolean" }, "regex_file": { @@ -55311,6 +55680,18 @@ }, "properties": { "description": "Controls what properties are added to `target_field`.", + "default": [ + "name", + "major", + "minor", + "patch", + "build", + "os", + "os_name", + "os_major", + "os_minor", + "device" + ], "type": "array", "items": { "$ref": "#/components/schemas/ingest._types.UserAgentProperty" @@ -55318,6 +55699,7 @@ }, "extract_device_type": { "description": "Extracts device type from the user agent string on a best-effort basis.", + "default": false, "x-state": "Generally available", "type": "boolean" } @@ -55850,6 +56232,7 @@ }, "use_null": { "description": "Defines whether a new series is used as the null series when there is no value for the by or partition fields.", + "default": false, "type": "boolean" } } @@ -55859,6 +56242,9 @@ "properties": { "actions": { "description": "The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined.", + "default": [ + "skip_result" + ], "type": "array", "items": { "$ref": "#/components/schemas/ml._types.RuleAction" @@ -56455,10 +56841,12 @@ }, "skip_result": { "description": "When true the model will not create results for this calendar period.", + "default": true, "type": "boolean" }, "skip_model_update": { "description": "When true the model will not be updated for this calendar period.", + "default": true, "type": "boolean" }, "force_time_shift": { @@ -56477,10 +56865,12 @@ "properties": { "from": { "description": "Skips the specified number of items.", + "default": 0.0, "type": "number" }, "size": { "description": "Specifies the maximum number of items to obtain.", + "default": 10000.0, "type": "number" } } @@ -56587,6 +56977,7 @@ }, "num_top_classes": { "description": "Defines the number of categories for which the predicted probabilities are reported. It must be non-negative or -1. If it is -1 or greater than the total number of categories, probabilities are reported for all categories; if you have a large number of categories, there could be a significant effect on the size of your destination index. NOTE: To use the AUC ROC evaluation method, `num_top_classes` must be set to -1 or a value greater than or equal to the total number of categories.", + "default": 2.0, "type": "number" } } @@ -56610,6 +57001,7 @@ }, "early_stopping_enabled": { "description": "Advanced configuration option. Specifies whether the training process should finish if it is not finding any better performing models. If disabled, the training process can take significantly longer and the chance of finding a better performing model is unremarkable.", + "default": true, "type": "boolean" }, "eta": { @@ -56649,6 +57041,7 @@ }, "num_top_feature_importance_values": { "description": "Advanced configuration option. Specifies the maximum number of feature importance values per document to return. By default, no feature importance calculation occurs.", + "default": 0.0, "type": "number" }, "prediction_field_name": { @@ -56827,14 +57220,17 @@ "properties": { "compute_feature_influence": { "description": "Specifies whether the feature influence calculation is enabled.", + "default": true, "type": "boolean" }, "feature_influence_threshold": { "description": "The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1.", + "default": 0.1, "type": "number" }, "method": { "description": "The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score.", + "default": "ensemble", "type": "string" }, "n_neighbors": { @@ -56847,6 +57243,7 @@ }, "standardization_enabled": { "description": "If true, the following operation is performed on the columns before computing outlier scores: `(x_i - mean(x_i)) / sd(x_i)`.", + "default": true, "type": "boolean" } } @@ -56861,6 +57258,7 @@ "properties": { "loss_function": { "description": "The loss function used during regression. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber loss).", + "default": "mse", "type": "string" }, "loss_function_parameter": { @@ -57161,10 +57559,12 @@ "properties": { "compute_feature_influence": { "description": "Specifies whether the feature influence calculation is enabled.", + "default": true, "type": "boolean" }, "feature_influence_threshold": { "description": "The minimum outlier score that a document needs to have in order to calculate its feature influence score.\nValue range: 0-1", + "default": 0.1, "type": "number" }, "method": { @@ -57181,6 +57581,7 @@ }, "standardization_enabled": { "description": "If `true`, the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i).", + "default": true, "type": "boolean" } } @@ -57528,10 +57929,12 @@ }, "ignore_unavailable": { "description": "If true, missing or closed indices are not included in the response.", + "default": false, "type": "boolean" }, "ignore_throttled": { "description": "If true, concrete, expanded or aliased indices are ignored when frozen.", + "default": true, "type": "boolean" } } @@ -57882,6 +58285,7 @@ }, "daily_model_snapshot_retention_after_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job.\nIt specifies a period of time (in days) after which only the first snapshot per day is retained.\nThis period is relative to the timestamp of the most recent snapshot for this job.\nValid values range from 0 to `model_snapshot_retention_days`.", + "default": 1.0, "type": "number" }, "data_description": { @@ -57954,6 +58358,7 @@ "properties": { "categorization_examples_limit": { "description": "The maximum number of examples stored per category in memory and in the results data store. If you increase this value, more examples are available, however it requires that you have more storage available. If you set this value to 0, no examples are stored. NOTE: The `categorization_examples_limit` applies only to analysis that uses categorization.", + "default": 4.0, "type": "number" }, "model_memory_limit": { @@ -57999,6 +58404,7 @@ }, "time_format": { "description": "The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan 1970). The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. The `epoch` and `epoch_ms` time formats accept either integer or real values. Custom patterns must conform to the Java DateTimeFormatter class. When you use date-time formatting patterns, it is recommended that you provide the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient to produce a complete timestamp, job creation fails.", + "default": "epoch", "type": "string" }, "field_delimiter": { @@ -58011,11 +58417,13 @@ "properties": { "annotations_enabled": { "description": "If true, enables calculation and storage of the model change annotations for each entity that is being analyzed.", + "default": true, "x-state": "Generally available", "type": "boolean" }, "enabled": { "description": "If true, enables calculation and storage of the model bounds for each entity that is being analyzed.", + "default": false, "type": "boolean" }, "terms": { @@ -58298,14 +58706,17 @@ "properties": { "do_lower_case": { "description": "Should the tokenizer lower case the text", + "default": false, "type": "boolean" }, "max_sequence_length": { "description": "Maximum input sequence length for the model", + "default": 512.0, "type": "number" }, "span": { "description": "Tokenization spanning options. Special value of -1 indicates no spanning takes place", + "default": -1.0, "type": "number" }, "truncate": { @@ -58313,6 +58724,7 @@ }, "with_special_tokens": { "description": "Is tokenization completed with special tokens", + "default": true, "type": "boolean" } } @@ -58336,6 +58748,7 @@ "properties": { "add_prefix_space": { "description": "Should the tokenizer prefix input with a space character", + "default": false, "type": "boolean" } } @@ -58372,6 +58785,7 @@ }, "hypothesis_template": { "description": "Hypothesis template used when tokenizing labels for prediction", + "default": "\"This example is {}.\"", "type": "string" }, "classification_labels": { @@ -58387,6 +58801,7 @@ }, "multi_label": { "description": "Indicates if more than one true label exists.", + "default": false, "type": "boolean" }, "labels": { @@ -59552,6 +59967,7 @@ }, "scroll_size": { "description": "The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default.", + "default": 1000.0, "type": "number" } } @@ -59561,6 +59977,7 @@ "properties": { "allow_lazy_open": { "description": "Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node.", + "default": false, "type": "boolean" }, "analysis_config": { @@ -59577,6 +59994,7 @@ }, "daily_model_snapshot_retention_after_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job.\nIt specifies a period of time (in days) after which only the first snapshot per day is retained.\nThis period is relative to the timestamp of the most recent snapshot for this job.", + "default": 1.0, "type": "number" }, "data_description": { @@ -59608,6 +60026,7 @@ }, "model_snapshot_retention_days": { "description": "Advanced configuration option, which affects the automatic removal of old model snapshots for this job.\nIt specifies the maximum period of time (in days) that snapshots are retained.\nThis period is relative to the timestamp of the most recent snapshot for this job.\nThe default value is `10`, which means snapshots ten days older than the newest snapshot are deleted.", + "default": 10.0, "type": "number" }, "renormalization_window_days": { @@ -59738,6 +60157,7 @@ }, "use_null": { "description": "Defines whether a new series is used as the null series when there is no value for the by or partition fields.", + "default": false, "type": "boolean" } }, @@ -60351,6 +60771,7 @@ "properties": { "explain": { "description": "If `true`, returns detailed information about score calculation as part of each hit.", + "default": false, "type": "boolean" }, "id": { @@ -60365,6 +60786,7 @@ }, "profile": { "description": "If `true`, the query execution is profiled.", + "default": false, "type": "boolean" }, "source": { @@ -60390,6 +60812,7 @@ }, "field_statistics": { "description": "If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies.", + "default": true, "type": "boolean" }, "filter": { @@ -60397,14 +60820,17 @@ }, "offsets": { "description": "If `true`, the response includes term offsets.", + "default": true, "type": "boolean" }, "payloads": { "description": "If `true`, the response includes term payloads.", + "default": true, "type": "boolean" }, "positions": { "description": "If `true`, the response includes term positions.", + "default": true, "type": "boolean" }, "routing": { @@ -60412,6 +60838,7 @@ }, "term_statistics": { "description": "If true, the response includes term frequency and document frequency.", + "default": false, "type": "boolean" }, "version": { @@ -60431,6 +60858,7 @@ }, "max_num_terms": { "description": "The maximum number of terms that must be returned per field.", + "default": 25.0, "type": "number" }, "max_term_freq": { @@ -60439,18 +60867,22 @@ }, "max_word_length": { "description": "The maximum word length above which words will be ignored.\nDefaults to unbounded.", + "default": 0.0, "type": "number" }, "min_doc_freq": { "description": "Ignore terms which do not occur in at least this many docs.", + "default": 1.0, "type": "number" }, "min_term_freq": { "description": "Ignore words with less than this frequency in the source doc.", + "default": 1.0, "type": "number" }, "min_word_length": { "description": "The minimum word length below which words will be ignored.", + "default": 0.0, "type": "number" } } @@ -60834,6 +61266,7 @@ "properties": { "ignore_unlabeled": { "description": "Controls how unlabeled documents in the search results are counted. If set to true, unlabeled documents are ignored and neither count as relevant or irrelevant. Set to false (the default), they are treated as irrelevant.", + "default": false, "type": "boolean" } } @@ -60850,6 +61283,7 @@ "properties": { "relevant_rating_threshold": { "description": "Sets the rating threshold above which documents are considered to be \"relevant\".", + "default": 1.0, "type": "number" } } @@ -60861,6 +61295,7 @@ "properties": { "k": { "description": "Sets the maximum number of documents retrieved per query. This value will act in place of the usual size parameter in the query.", + "default": 10.0, "type": "number" } } @@ -60898,6 +61333,7 @@ "properties": { "normalize": { "description": "If set to true, this metric will calculate the Normalized DCG.", + "default": false, "type": "boolean" } } @@ -61057,6 +61493,7 @@ }, "size": { "description": "The number of documents to index per batch.\nUse it when you are indexing from remote to ensure that the batches fit within the on-heap buffer, which defaults to a maximum size of 100 MB.", + "default": 1000.0, "type": "number" }, "slice": { @@ -61719,6 +62156,7 @@ }, "allow_restricted_indices": { "description": "This needs to be set to true if the patterns in the names field should cover system indices.", + "default": false, "type": "boolean" } }, @@ -62064,10 +62502,12 @@ }, "other_bucket_key": { "description": "The key with which the other bucket is returned.", + "default": "_other_", "type": "string" }, "keyed": { "description": "By default, the named filters aggregation returns the buckets as an object.\nSet to `false` to return the buckets as an array of objects.", + "default": true, "type": "boolean" } } @@ -62587,14 +63027,17 @@ "properties": { "align_checkpoints": { "description": "Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align\ncheckpoint ranges with the date histogram interval when date histogram is specified as a group source in the\ntransform config. As a result, less document updates in the destination index will be performed thus improving\noverall performance.", + "default": true, "type": "boolean" }, "dates_as_epoch_millis": { "description": "Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was\nthe default for transforms created before version 7.11. For compatible output set this value to `true`.", + "default": false, "type": "boolean" }, "deduce_mappings": { "description": "Specifies whether the transform should deduce the destination index mappings from the transform configuration.", + "default": true, "type": "boolean" }, "docs_per_second": { @@ -62603,10 +63046,12 @@ }, "max_page_search_size": { "description": "Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker\nexceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the\nmaximum is `65,536`.", + "default": 500.0, "type": "number" }, "unattended": { "description": "If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case\nof an error which means the transform never fails. Setting the number of retries other than infinite fails in\nvalidation.", + "default": false, "x-state": "Generally available", "type": "boolean" } @@ -64662,6 +65107,23 @@ "count", "api_keys" ] + }, + "examples": { + "QueryApiKeysResponseExample1": { + "summary": "Query API keys by ID", + "description": "A successful response from `GET /_security/_query/api_key?with_limited_by=true`. The `limited_by` details are the owner user's permissions associated with the API key. It is a point-in-time snapshot captured at creation and subsequent updates. An API key's effective permissions are an intersection of its assigned privileges and the owner user's permissions.\n", + "value": "{\n \"api_keys\": [\n {\n \"id\": \"VuaCfGcBCdbkQm-e5aOx\",\n \"name\": \"application-key-1\",\n \"creation\": 1548550550158,\n \"expiration\": 1548551550158,\n \"invalidated\": false,\n \"username\": \"myuser\",\n \"realm\": \"native1\",\n \"realm_type\": \"native\",\n \"metadata\": {\n \"application\": \"my-application\"\n },\n \"role_descriptors\": { },\n \"limited_by\": [ \n {\n \"role-power-user\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"*\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample2": { + "summary": "Query API keys with pagination", + "description": "An abbreviated response from `GET /_security/_query/api_key` that contains a list of matched API keys along with their sort values. The first sort value is creation time, which is displayed in `date_time` format. The second sort value is the API key name.\n", + "value": "{\n \"total\": 100,\n \"count\": 10,\n \"api_keys\": [\n {\n \"id\": \"CLXgVnsBOGkf8IyjcXU7\",\n \"name\": \"app1-key-79\",\n \"creation\": 1629250154811,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:14.811Z\", \n \"app1-key-79\" \n ]\n },\n {\n \"id\": \"BrXgVnsBOGkf8IyjbXVB\",\n \"name\": \"app1-key-78\",\n \"creation\": 1629250153794,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:13.794Z\",\n \"app1-key-78\"\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample3": { + "summary": "Query all API keys", + "description": "A successful response from `GET /_security/_query/api_key`. It includes the role descriptors that are assigned to each API key when it was created or last updated. Note that an API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of the owner user's permissions. An empty role descriptors object means the API key inherits the owner user's permissions.\n", + "value": "{\n \"total\": 3,\n \"count\": 3,\n \"api_keys\": [ \n {\n \"id\": \"nkvrGXsB8w290t56q3Rg\",\n \"name\": \"my-api-key-1\",\n \"creation\": 1628227480421,\n \"expiration\": 1629091480421,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"realm_type\": \"reserved\",\n \"metadata\": {\n \"letter\": \"a\"\n },\n \"role_descriptors\": { \n \"role-a\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index-a\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n },\n {\n \"id\": \"oEvrGXsB8w290t5683TI\",\n \"name\": \"my-api-key-2\",\n \"creation\": 1628227498953,\n \"expiration\": 1628313898953,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"metadata\": {\n \"letter\": \"b\"\n },\n \"role_descriptors\": { } \n }\n ]\n}" + } } } } @@ -70373,6 +70835,7 @@ }, "explain": { "description": "If true, returns detailed information about score computation as part of a hit.", + "default": false, "type": "boolean" }, "ext": { @@ -70384,6 +70847,7 @@ }, "from": { "description": "Starting document offset. By default, you cannot page through more than 10,000\nhits using the from and size parameters. To page through more hits, use the\nsearch_after parameter.", + "default": 0.0, "type": "number" }, "highlight": { @@ -70464,6 +70928,7 @@ }, "size": { "description": "The number of hits to return. By default, you cannot page through more\nthan 10,000 hits using the from and size parameters. To page through more\nhits, use the search_after parameter.", + "default": 10.0, "type": "number" }, "slice": { @@ -70487,6 +70952,7 @@ }, "terminate_after": { "description": "Maximum number of documents to collect for each shard. If a query reaches this\nlimit, Elasticsearch terminates the query early. Elasticsearch collects documents\nbefore sorting. Defaults to 0, which does not terminate query execution early.", + "default": 0.0, "type": "number" }, "timeout": { @@ -70495,10 +70961,12 @@ }, "track_scores": { "description": "If true, calculate and return document scores, even if the scores are not used for sorting.", + "default": false, "type": "boolean" }, "version": { "description": "If true, returns document version as part of a hit.", + "default": false, "type": "boolean" }, "seq_no_primary_term": { @@ -70759,10 +71227,12 @@ }, "allow_partial_search_results": { "description": "Allow query execution also in case of shard failures.\nIf true, the query will keep running and will return results based on the available shards.\nFor sequences, the behavior can be further refined using allow_partial_sequence_results", + "default": true, "type": "boolean" }, "allow_partial_sequence_results": { "description": "This flag applies only to sequences and has effect only if allow_partial_search_results=true.\nIf true, the sequence query will return results based on the available shards, ignoring the others.\nIf false, the sequence query will return successfully, but will always have empty results.", + "default": false, "type": "boolean" }, "size": { @@ -70790,6 +71260,7 @@ }, "max_samples_per_key": { "description": "By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size`\nparameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the\n`max_samples_per_key` parameter. Pipes are not supported for sample queries.", + "default": 1.0, "type": "number" } }, @@ -70940,6 +71411,7 @@ }, "explain": { "description": "If `true`, the response includes token attributes and additional details.", + "default": false, "type": "boolean" }, "field": { @@ -71140,6 +71612,7 @@ }, "numeric_detection": { "description": "Automatically map strings into numeric data types for all fields.", + "default": false, "type": "boolean" }, "properties": { @@ -71455,6 +71928,7 @@ "properties": { "allow_no_match": { "description": "Refer to the description for the `allow_no_match` query parameter.", + "default": true, "type": "boolean" }, "bucket_span": { @@ -71465,6 +71939,7 @@ }, "exclude_interim": { "description": "Refer to the description for the `exclude_interim` query parameter.", + "default": false, "type": "boolean" }, "overall_score": { @@ -71483,6 +71958,7 @@ }, "top_n": { "description": "Refer to the description for the `top_n` query parameter.", + "default": 1.0, "type": "number" } } @@ -71791,6 +72267,7 @@ }, "explain": { "description": "If `true`, the request returns detailed information about score computation as part of a hit.", + "default": false, "type": "boolean" }, "ext": { @@ -71802,6 +72279,7 @@ }, "from": { "description": "The starting document offset, which must be non-negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 0.0, "type": "number" }, "highlight": { @@ -71862,6 +72340,7 @@ }, "profile": { "description": "Set to `true` to return detailed timing information about the execution of individual components in a search request.\nNOTE: This is a debugging tool and adds significant overhead to search execution.", + "default": false, "type": "boolean" }, "query": { @@ -71896,6 +72375,7 @@ }, "size": { "description": "The number of hits to return, which must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` property.", + "default": 10.0, "type": "number" }, "slice": { @@ -71919,6 +72399,7 @@ }, "terminate_after": { "description": "The maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\n\nIMPORTANT: Use with caution.\nElasticsearch applies this property to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this property for requests that target data streams with backing indices across multiple data tiers.\n\nIf set to `0` (default), the query does not terminate early.", + "default": 0.0, "type": "number" }, "timeout": { @@ -71927,10 +72408,12 @@ }, "track_scores": { "description": "If `true`, calculate and return document scores, even if the scores are not used for sorting.", + "default": false, "type": "boolean" }, "version": { "description": "If `true`, the request returns the document version as part of a hit.", + "default": false, "type": "boolean" }, "seq_no_primary_term": { @@ -72017,14 +72500,17 @@ }, "buffer": { "description": "The size, in pixels, of a clipping buffer outside the tile. This allows renderers\nto avoid outline artifacts from geometries that extend past the extent of the tile.", + "default": 5.0, "type": "number" }, "exact_bounds": { "description": "If `false`, the meta layer's feature is the bounding box of the tile.\nIf `true`, the meta layer's feature is a bounding box resulting from a\n`geo_bounds` aggregation. The aggregation runs on values that intersect\nthe `//` tile with `wrap_longitude` set to `false`. The resulting\nbounding box may be larger than the vector tile.", + "default": false, "type": "boolean" }, "extent": { "description": "The size, in pixels, of a side of the tile. Vector tiles are square with equal sides.", + "default": 4096.0, "type": "number" }, "fields": { @@ -72035,6 +72521,7 @@ }, "grid_precision": { "description": "Additional zoom levels available through the aggs layer. For example, if `` is `7`\nand `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results\ndon't include the aggs layer.", + "default": 8.0, "type": "number" }, "grid_type": { @@ -72048,6 +72535,7 @@ }, "size": { "description": "The maximum number of features to return in the hits layer. Accepts 0-10000.\nIf 0, results don't include the hits layer.", + "default": 10000.0, "type": "number" }, "sort": { @@ -72079,6 +72567,7 @@ "properties": { "explain": { "description": "If `true`, returns detailed information about score calculation as part of each hit.\nIf you specify both this and the `explain` query parameter, the API uses only the query parameter.", + "default": false, "type": "boolean" }, "id": { @@ -72093,6 +72582,7 @@ }, "profile": { "description": "If `true`, the query execution is profiled.", + "default": false, "type": "boolean" }, "source": { @@ -72276,6 +72766,7 @@ }, "from": { "description": "The starting document offset.\nIt must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 0.0, "type": "number" }, "sort": { @@ -72283,6 +72774,7 @@ }, "size": { "description": "The number of hits to return.\nIt must not be negative.\nThe `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 10.0, "type": "number" }, "search_after": { @@ -72295,6 +72787,16 @@ "summary": "Query API keys by ID", "description": "Run `GET /_security/_query/api_key?with_limited_by=true` to retrieve an API key by ID.", "value": "{\n \"query\": {\n \"ids\": {\n \"values\": [\n \"VuaCfGcBCdbkQm-e5aOx\"\n ]\n }\n }\n}" + }, + "QueryApiKeysRequestExample2": { + "summary": "Query API keys with pagination", + "description": "Run `GET /_security/_query/api_key`. Use a `bool` query to issue complex logical conditions and use `from`, `size`, and `sort` to help paginate the result. For example, the API key name must begin with `app1-key-` and must not be `app1-key-01`. It must be owned by a username with the wildcard pattern `org-*-user` and the `environment` metadata field must have a `production` value. The offset to begin the search result is the twentieth (zero-based index) API key. The page size of the response is 10 API keys. The result is first sorted by creation date in descending order, then by name in ascending order.\n", + "value": "{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"prefix\": {\n \"name\": \"app1-key-\" \n }\n },\n {\n \"term\": {\n \"invalidated\": \"false\" \n }\n }\n ],\n \"must_not\": [\n {\n \"term\": {\n \"name\": \"app1-key-01\" \n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"username\": \"org-*-user\" \n }\n },\n {\n \"term\": {\n \"metadata.environment\": \"production\" \n }\n }\n ]\n }\n },\n \"from\": 20, \n \"size\": 10, \n \"sort\": [ \n { \"creation\": { \"order\": \"desc\", \"format\": \"date_time\" } },\n \"name\"\n ]\n}" + }, + "QueryApiKeysRequestExample3": { + "summary": "Query API keys by name", + "description": "Run `GET /_security/_query/api_key` to retrieve the API key by name.", + "value": "{\n \"query\": {\n \"term\": {\n \"name\": {\n \"value\": \"application-key-1\"\n }\n }\n }\n}" } } } @@ -72311,6 +72813,7 @@ }, "from": { "description": "The starting document offset.\nIt must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 0.0, "type": "number" }, "sort": { @@ -72318,6 +72821,7 @@ }, "size": { "description": "The number of hits to return.\nIt must not be negative.\nBy default, you cannot page through more than 10,000 hits using the `from` and `size` parameters.\nTo page through more hits, use the `search_after` parameter.", + "default": 10.0, "type": "number" }, "search_after": { @@ -72348,6 +72852,7 @@ "properties": { "allow_partial_search_results": { "description": "If `true`, the response has partial results when there are shard request timeouts or shard failures.\nIf `false`, the API returns an error with no partial results.", + "default": false, "type": "boolean" }, "catalog": { @@ -72359,6 +72864,7 @@ "url": "https://www.elastic.co/docs/explore-analyze/query-filter/languages/sql-rest-columnar" }, "description": "If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results.\nThe API supports this parameter only for CBOR, JSON, SMILE, and YAML responses.", + "default": false, "type": "boolean" }, "cursor": { @@ -72367,10 +72873,12 @@ }, "fetch_size": { "description": "The maximum number of rows (or entries) to return in one response.", + "default": 1000.0, "type": "number" }, "field_multi_value_leniency": { "description": "If `false`, the API returns an exception when encountering multiple values for a field.\nIf `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results.", + "default": false, "type": "boolean" }, "filter": { @@ -72378,6 +72886,7 @@ }, "index_using_frozen": { "description": "If `true`, the search can run on frozen indices.", + "default": false, "type": "boolean" }, "keep_alive": { @@ -72385,6 +72894,7 @@ }, "keep_on_completion": { "description": "If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter.\nIf `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`.", + "default": false, "type": "boolean" }, "page_timeout": { @@ -72436,6 +72946,7 @@ "properties": { "fetch_size": { "description": "The maximum number of rows (or entries) to return in one response.", + "default": 1000.0, "type": "number" }, "filter": { @@ -72475,6 +72986,7 @@ }, "size": { "description": "The number of matching terms to return.", + "default": 10.0, "type": "number" }, "timeout": { @@ -72482,6 +72994,7 @@ }, "case_insensitive": { "description": "When `true`, the provided search string is matched against index terms without case sensitivity.", + "default": false, "type": "boolean" }, "index_filter": { @@ -72534,22 +73047,27 @@ }, "field_statistics": { "description": "If `true`, the response includes:\n\n* The document count (how many documents contain this field).\n* The sum of document frequencies (the sum of document frequencies for all terms in this field).\n* The sum of total term frequencies (the sum of total term frequencies of each term in this field).", + "default": true, "type": "boolean" }, "offsets": { "description": "If `true`, the response includes term offsets.", + "default": true, "type": "boolean" }, "payloads": { "description": "If `true`, the response includes term payloads.", + "default": true, "type": "boolean" }, "positions": { "description": "If `true`, the response includes term positions.", + "default": true, "type": "boolean" }, "term_statistics": { "description": "If `true`, the response includes:\n\n* The total term frequency (how often a term occurs in all documents).\n* The document frequency (the number of documents containing the current term).\n\nBy default these values are not returned since term statistics can have a serious performance impact.", + "default": false, "type": "boolean" }, "routing": { diff --git a/output/schema/schema.json b/output/schema/schema.json index c35818cae9..8adb75255a 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -14682,7 +14682,7 @@ "description": "Update a data frame analytics job.", "docId": "update-dfanalytics", "docTag": "ml data frame", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-data-frame-analytics", + "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics", "extPreviousVersionDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/update-dfanalytics.html", "name": "ml.update_data_frame_analytics", "privileges": { @@ -16684,9 +16684,11 @@ "description": "", "version": "8.11.0" }, - "description": "Search rolled-up data.\nThe rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data.\nIt rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.\n\nThe request body supports a subset of features from the regular search API.\nThe following functionality is not available:\n\n`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.\n`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed.\n\n**Searching both historical rollup and non-rollup data**\n\nThe rollup search API has the capability to search across both \"live\" non-rollup data and the aggregated rollup data.\nThis is done by simply adding the live indices to the URI. For example:\n\n```\nGET sensor-1,sensor_rollup/_rollup_search\n{\n \"size\": 0,\n \"aggregations\": {\n \"max_temperature\": {\n \"max\": {\n \"field\": \"temperature\"\n }\n }\n }\n}\n```\n\nThe rollup search endpoint does two things when the search runs:\n\n* The original request is sent to the non-rollup index unaltered.\n* A rewritten version of the original request is sent to the rollup index.\n\nWhen the two responses are received, the endpoint rewrites the rollup response and merges the two together.\nDuring the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.", + "description": "Search rolled-up data.\nThe rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data.\nIt rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.\n\nThe request body supports a subset of features from the regular search API.\nThe following functionality is not available:\n\n`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.\n`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed.\n\nFor more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation.", "docId": "rollup-search", "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-rollup-search", + "extDocId": "rollup-examples", + "extDocUrl": "https://www.elastic.co/docs/manage-data/lifecycle/rollup/getting-started-api#historical-only-search-example", "extPreviousVersionDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/rollup-search.html", "name": "rollup.rollup_search", "request": { @@ -22161,7 +22163,7 @@ }, "description": "Create or update a synonym set.\nSynonyms sets are limited to a maximum of 10,000 synonym rules per set.\nIf you need to manage more synonym rules, you can create multiple synonym sets.\n\nWhen an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices.\nThis is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.\n\nFor practical examples of how to create or update a synonyms set, refer to the External documentation.", "docId": "synonym-set-create", - "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym", + "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-put-synonym", "extDocId": "synonym-api-examples", "extDocUrl": "https://www.elastic.co/docs/solutions/search/full-text/create-update-synonyms-api-example", "extPreviousVersionDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/put-synonyms-set.html", @@ -23188,10 +23190,12 @@ "stability": "stable" } }, - "description": "Update a document.\n\nUpdate a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).", + "description": "Update a document.\n\nUpdate a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).\nFor usage examples such as partial updates, upserts, and scripted updates, see the External documentation.", "docId": "docs-update", "docTag": "document", "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update", + "extDocId": "update-document", + "extDocUrl": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-document", "extPreviousVersionDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/docs-update.html", "name": "update", "privileges": { @@ -23234,10 +23238,12 @@ "stability": "stable" } }, - "description": "Update documents.\nUpdates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.", + "description": "Update documents.\nUpdates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Refreshing shards**\n\nSpecifying the `refresh` parameter refreshes all shards once the request completes.\nThis is different to the update API's `refresh` parameter, which causes only the shard\nthat received the request to be refreshed. Unlike the update API, it does not support\n`wait_for`.\n\n**Running update by query asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch\nperforms some preflight checks, launches the request, and returns a\n[task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`.\n\n**Waiting for active shards**\n\n`wait_for_active_shards` controls how many copies of a shard must be active\nbefore proceeding with the request. See [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards)\nfor details. `timeout` controls how long each write request waits for unavailable\nshards to become available. Both work exactly the way they work in the\n[Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). Update by query uses scrolled searches, so you can also\nspecify the `scroll` parameter to control how long it keeps the search context\nalive, for example `?scroll=10m`. The default is 5 minutes.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\nRefer to the linked documentation for examples of how to update documents using the `_update_by_query` API:", "docId": "docs-update-by-query", "docTag": "document", "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update-by-query", + "extDocId": "update-by-query", + "extDocUrl": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-by-query-api", "extPreviousVersionDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/docs-update-by-query.html", "name": "update_by_query", "privileges": { @@ -35206,6 +35212,342 @@ "method_request": "POST _reindex", "summary": "Reindex multiple sources", "value": "{\n \"source\": {\n \"index\": [\"my-index-000001\", \"my-index-000002\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000002\"\n }\n}" + }, + "ReindexRequestExample10": { + "alternatives": [ + { + "code": "resp = client.reindex(\n source={\n \"index\": \"metricbeat-*\"\n },\n dest={\n \"index\": \"metricbeat\"\n },\n script={\n \"lang\": \"painless\",\n \"source\": \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n source: {\n index: \"metricbeat-*\",\n },\n dest: {\n index: \"metricbeat\",\n },\n script: {\n lang: \"painless\",\n source:\n \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"source\": {\n \"index\": \"metricbeat-*\"\n },\n \"dest\": {\n \"index\": \"metricbeat\"\n },\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"source\" => [\n \"index\" => \"metricbeat-*\",\n ],\n \"dest\" => [\n \"index\" => \"metricbeat\",\n ],\n \"script\" => [\n \"lang\" => \"painless\",\n \"source\" => \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"index\":\"metricbeat-*\"},\"dest\":{\"index\":\"metricbeat\"},\"script\":{\"lang\":\"painless\",\"source\":\"ctx._index = '\"'\"'metricbeat-'\"'\"' + (ctx._index.substring('\"'\"'metricbeat-'\"'\"'.length(), ctx._index.length())) + '\"'\"'-1'\"'\"'\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "You can use Painless to reindex daily indices to apply a new template to the existing documents. The script extracts the date from the index name and creates a new index with `-1` appended. For example, all data from `metricbeat-2016.05.31` will be reindexed into `metricbeat-2016.05.31-1`.\n", + "method_request": "POST _reindex", + "summary": "Reindex with Painless", + "value": "{\n \"source\": {\n \"index\": \"metricbeat-*\"\n },\n \"dest\": {\n \"index\": \"metricbeat\"\n },\n \"script\": {\n \"lang\": \"painless\",\n \"source\": \"ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'\"\n }\n}" + }, + "ReindexRequestExample11": { + "alternatives": [ + { + "code": "resp = client.reindex(\n max_docs=10,\n source={\n \"index\": \"my-index-000001\",\n \"query\": {\n \"function_score\": {\n \"random_score\": {},\n \"min_score\": 0.9\n }\n }\n },\n dest={\n \"index\": \"my-new-index-000001\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n max_docs: 10,\n source: {\n index: \"my-index-000001\",\n query: {\n function_score: {\n random_score: {},\n min_score: 0.9,\n },\n },\n },\n dest: {\n index: \"my-new-index-000001\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"max_docs\": 10,\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"function_score\": {\n \"random_score\": {},\n \"min_score\": 0.9\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"max_docs\" => 10,\n \"source\" => [\n \"index\" => \"my-index-000001\",\n \"query\" => [\n \"function_score\" => [\n \"random_score\" => new ArrayObject([]),\n \"min_score\" => 0.9,\n ],\n ],\n ],\n \"dest\" => [\n \"index\" => \"my-new-index-000001\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"max_docs\":10,\"source\":{\"index\":\"my-index-000001\",\"query\":{\"function_score\":{\"random_score\":{},\"min_score\":0.9}}},\"dest\":{\"index\":\"my-new-index-000001\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "Run `POST _reindex` to extract a random subset of the source for testing. You might need to adjust the `min_score` value depending on the relative amount of data extracted from source.\n", + "method_request": "POST _reindex", + "summary": "Reindex a random subset", + "value": "{\n \"max_docs\": 10,\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"function_score\" : {\n \"random_score\" : {},\n \"min_score\" : 0.9\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample12": { + "alternatives": [ + { + "code": "resp = client.reindex(\n source={\n \"index\": \"my-index-000001\"\n },\n dest={\n \"index\": \"my-new-index-000001\",\n \"version_type\": \"external\"\n },\n script={\n \"source\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n \"lang\": \"painless\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n source: {\n index: \"my-index-000001\",\n },\n dest: {\n index: \"my-new-index-000001\",\n version_type: \"external\",\n },\n script: {\n source:\n \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n lang: \"painless\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\",\n \"version_type\": \"external\"\n },\n \"script\": {\n \"source\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n \"lang\": \"painless\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"source\" => [\n \"index\" => \"my-index-000001\",\n ],\n \"dest\" => [\n \"index\" => \"my-new-index-000001\",\n \"version_type\" => \"external\",\n ],\n \"script\" => [\n \"source\" => \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n \"lang\" => \"painless\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"index\":\"my-index-000001\"},\"dest\":{\"index\":\"my-new-index-000001\",\"version_type\":\"external\"},\"script\":{\"source\":\"if (ctx._source.foo == '\"'\"'bar'\"'\"') {ctx._version++; ctx._source.remove('\"'\"'foo'\"'\"')}\",\"lang\":\"painless\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "Run `POST _reindex` to modify documents during reindexing. This example bumps the version of the source document.\n", + "method_request": "POST _reindex", + "summary": "Reindex modified documents", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\",\n \"version_type\": \"external\"\n },\n \"script\": {\n \"source\": \"if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}\",\n \"lang\": \"painless\"\n }\n}" + }, + "ReindexRequestExample13": { + "alternatives": [ + { + "code": "resp = client.reindex(\n source={\n \"remote\": {\n \"host\": \"http://otherhost:9200\",\n \"username\": \"user\",\n \"password\": \"pass\"\n },\n \"index\": \"my-index-000001\",\n \"query\": {\n \"match\": {\n \"test\": \"data\"\n }\n }\n },\n dest={\n \"index\": \"my-new-index-000001\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n source: {\n remote: {\n host: \"http://otherhost:9200\",\n username: \"user\",\n password: \"pass\",\n },\n index: \"my-index-000001\",\n query: {\n match: {\n test: \"data\",\n },\n },\n },\n dest: {\n index: \"my-new-index-000001\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"source\": {\n \"remote\": {\n \"host\": \"http://otherhost:9200\",\n \"username\": \"user\",\n \"password\": \"pass\"\n },\n \"index\": \"my-index-000001\",\n \"query\": {\n \"match\": {\n \"test\": \"data\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"source\" => [\n \"remote\" => [\n \"host\" => \"http://otherhost:9200\",\n \"username\" => \"user\",\n \"password\" => \"pass\",\n ],\n \"index\" => \"my-index-000001\",\n \"query\" => [\n \"match\" => [\n \"test\" => \"data\",\n ],\n ],\n ],\n \"dest\" => [\n \"index\" => \"my-new-index-000001\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"remote\":{\"host\":\"http://otherhost:9200\",\"username\":\"user\",\"password\":\"pass\"},\"index\":\"my-index-000001\",\"query\":{\"match\":{\"test\":\"data\"}}},\"dest\":{\"index\":\"my-new-index-000001\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "When using Elastic Cloud, you can run `POST _reindex` and authenticate against a remote cluster with an API key.\n", + "method_request": "POST _reindex", + "summary": "Reindex from remote on Elastic Cloud", + "value": "{\n \"source\": {\n \"remote\": {\n \"host\": \"http://otherhost:9200\",\n \"username\": \"user\",\n \"password\": \"pass\"\n },\n \"index\": \"my-index-000001\",\n \"query\": {\n \"match\": {\n \"test\": \"data\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample2": { + "alternatives": [ + { + "code": "resp = client.reindex(\n source={\n \"index\": \"my-index-000001\",\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n }\n },\n dest={\n \"index\": \"my-new-index-000001\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n source: {\n index: \"my-index-000001\",\n slice: {\n id: 0,\n max: 2,\n },\n },\n dest: {\n index: \"my-new-index-000001\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"source\": {\n \"index\": \"my-index-000001\",\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"source\" => [\n \"index\" => \"my-index-000001\",\n \"slice\" => [\n \"id\" => 0,\n \"max\" => 2,\n ],\n ],\n \"dest\" => [\n \"index\" => \"my-new-index-000001\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"index\":\"my-index-000001\",\"slice\":{\"id\":0,\"max\":2}},\"dest\":{\"index\":\"my-new-index-000001\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "Run `POST _reindex` to slice a reindex request manually. Provide a slice ID and total number of slices to each request.\n", + "method_request": "POST _reindex", + "summary": "Manual slicing", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"slice\": {\n \"id\": 0,\n \"max\": 2\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample3": { + "alternatives": [ + { + "code": "resp = client.reindex(\n slices=\"5\",\n refresh=True,\n source={\n \"index\": \"my-index-000001\"\n },\n dest={\n \"index\": \"my-new-index-000001\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n slices: 5,\n refresh: \"true\",\n source: {\n index: \"my-index-000001\",\n },\n dest: {\n index: \"my-new-index-000001\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n slices: \"5\",\n refresh: \"true\",\n body: {\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"slices\" => \"5\",\n \"refresh\" => \"true\",\n \"body\" => [\n \"source\" => [\n \"index\" => \"my-index-000001\",\n ],\n \"dest\" => [\n \"index\" => \"my-new-index-000001\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"index\":\"my-index-000001\"},\"dest\":{\"index\":\"my-new-index-000001\"}}' \"$ELASTICSEARCH_URL/_reindex?slices=5&refresh\"", + "language": "curl" + } + ], + "description": "Run `POST _reindex?slices=5&refresh` to automatically parallelize using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use.\n", + "method_request": "POST _reindex?slices=5&refresh", + "summary": "Automatic slicing", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample4": { + "alternatives": [ + { + "code": "resp = client.reindex(\n source={\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n dest={\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n source: {\n index: \"source\",\n query: {\n match: {\n company: \"cat\",\n },\n },\n },\n dest: {\n index: \"dest\",\n routing: \"=cat\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"source\": {\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"source\" => [\n \"index\" => \"source\",\n \"query\" => [\n \"match\" => [\n \"company\" => \"cat\",\n ],\n ],\n ],\n \"dest\" => [\n \"index\" => \"dest\",\n \"routing\" => \"=cat\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"index\":\"source\",\"query\":{\"match\":{\"company\":\"cat\"}}},\"dest\":{\"index\":\"dest\",\"routing\":\"=cat\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "By default if reindex sees a document with routing then the routing is preserved unless it's changed by the script. You can set `routing` on the `dest` request to change this behavior. In this example, run `POST _reindex` to copy all documents from the `source` with the company name `cat` into the `dest` with routing set to `cat`.\n", + "method_request": "POST _reindex", + "summary": "Routing", + "value": "{\n \"source\": {\n \"index\": \"source\",\n \"query\": {\n \"match\": {\n \"company\": \"cat\"\n }\n }\n },\n \"dest\": {\n \"index\": \"dest\",\n \"routing\": \"=cat\"\n }\n}" + }, + "ReindexRequestExample5": { + "alternatives": [ + { + "code": "resp = client.reindex(\n source={\n \"index\": \"source\"\n },\n dest={\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n source: {\n index: \"source\",\n },\n dest: {\n index: \"dest\",\n pipeline: \"some_ingest_pipeline\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"source\": {\n \"index\": \"source\"\n },\n \"dest\": {\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"source\" => [\n \"index\" => \"source\",\n ],\n \"dest\" => [\n \"index\" => \"dest\",\n \"pipeline\" => \"some_ingest_pipeline\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"index\":\"source\"},\"dest\":{\"index\":\"dest\",\"pipeline\":\"some_ingest_pipeline\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "Run `POST _reindex` and use the ingest pipelines feature.", + "method_request": "POST _reindex", + "summary": "Ingest pipelines", + "value": "{\n \"source\": {\n \"index\": \"source\"\n },\n \"dest\": {\n \"index\": \"dest\",\n \"pipeline\": \"some_ingest_pipeline\"\n }\n}" + }, + "ReindexRequestExample6": { + "alternatives": [ + { + "code": "resp = client.reindex(\n source={\n \"index\": \"my-index-000001\",\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n },\n dest={\n \"index\": \"my-new-index-000001\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n source: {\n index: \"my-index-000001\",\n query: {\n term: {\n \"user.id\": \"kimchy\",\n },\n },\n },\n dest: {\n index: \"my-new-index-000001\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"source\" => [\n \"index\" => \"my-index-000001\",\n \"query\" => [\n \"term\" => [\n \"user.id\" => \"kimchy\",\n ],\n ],\n ],\n \"dest\" => [\n \"index\" => \"my-new-index-000001\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"index\":\"my-index-000001\",\"query\":{\"term\":{\"user.id\":\"kimchy\"}}},\"dest\":{\"index\":\"my-new-index-000001\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "Run `POST _reindex` and add a query to the `source` to limit the documents to reindex. For example, this request copies documents into `my-new-index-000001` only if they have a `user.id` of `kimchy`.\n", + "method_request": "POST _reindex", + "summary": "Reindex with a query", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"query\": {\n \"term\": {\n \"user.id\": \"kimchy\"\n }\n }\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample7": { + "alternatives": [ + { + "code": "resp = client.reindex(\n max_docs=1,\n source={\n \"index\": \"my-index-000001\"\n },\n dest={\n \"index\": \"my-new-index-000001\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n max_docs: 1,\n source: {\n index: \"my-index-000001\",\n },\n dest: {\n index: \"my-new-index-000001\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"max_docs\": 1,\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"max_docs\" => 1,\n \"source\" => [\n \"index\" => \"my-index-000001\",\n ],\n \"dest\" => [\n \"index\" => \"my-new-index-000001\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"max_docs\":1,\"source\":{\"index\":\"my-index-000001\"},\"dest\":{\"index\":\"my-new-index-000001\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "You can limit the number of processed documents by setting `max_docs`. For example, run `POST _reindex` to copy a single document from `my-index-000001` to `my-new-index-000001`.\n", + "method_request": "POST _reindex", + "summary": "Reindex with max_docs", + "value": "{\n \"max_docs\": 1,\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample8": { + "alternatives": [ + { + "code": "resp = client.reindex(\n source={\n \"index\": \"my-index-000001\",\n \"_source\": [\n \"user.id\",\n \"_doc\"\n ]\n },\n dest={\n \"index\": \"my-new-index-000001\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n source: {\n index: \"my-index-000001\",\n _source: [\"user.id\", \"_doc\"],\n },\n dest: {\n index: \"my-new-index-000001\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"source\": {\n \"index\": \"my-index-000001\",\n \"_source\": [\n \"user.id\",\n \"_doc\"\n ]\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"source\" => [\n \"index\" => \"my-index-000001\",\n \"_source\" => array(\n \"user.id\",\n \"_doc\",\n ),\n ],\n \"dest\" => [\n \"index\" => \"my-new-index-000001\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"index\":\"my-index-000001\",\"_source\":[\"user.id\",\"_doc\"]},\"dest\":{\"index\":\"my-new-index-000001\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "You can use source filtering to reindex a subset of the fields in the original documents. For example, run `POST _reindex` the reindex only the `user.id` and `_doc` fields of each document.\n", + "method_request": "POST _reindex", + "summary": "Reindex selected fields", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\",\n \"_source\": [\"user.id\", \"_doc\"]\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n }\n}" + }, + "ReindexRequestExample9": { + "alternatives": [ + { + "code": "resp = client.reindex(\n source={\n \"index\": \"my-index-000001\"\n },\n dest={\n \"index\": \"my-new-index-000001\"\n },\n script={\n \"source\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.reindex({\n source: {\n index: \"my-index-000001\",\n },\n dest: {\n index: \"my-new-index-000001\",\n },\n script: {\n source: 'ctx._source.tag = ctx._source.remove(\"flag\")',\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.reindex(\n body: {\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n },\n \"script\": {\n \"source\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->reindex([\n \"body\" => [\n \"source\" => [\n \"index\" => \"my-index-000001\",\n ],\n \"dest\" => [\n \"index\" => \"my-new-index-000001\",\n ],\n \"script\" => [\n \"source\" => \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"source\":{\"index\":\"my-index-000001\"},\"dest\":{\"index\":\"my-new-index-000001\"},\"script\":{\"source\":\"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"}}' \"$ELASTICSEARCH_URL/_reindex\"", + "language": "curl" + } + ], + "description": "A reindex operation can build a copy of an index with renamed fields. If your index has documents with `text` and `flag` fields, you can change the latter field name to `tag` during the reindex.\n", + "method_request": "POST _reindex", + "summary": "Reindex new field names", + "value": "{\n \"source\": {\n \"index\": \"my-index-000001\"\n },\n \"dest\": {\n \"index\": \"my-new-index-000001\"\n },\n \"script\": {\n \"source\": \"ctx._source.tag = ctx._source.remove(\\\"flag\\\")\"\n }\n}" } }, "inherits": { @@ -47166,7 +47508,7 @@ } ] }, - "description": "Update a document.\n\nUpdate a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).", + "description": "Update a document.\n\nUpdate a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).\nFor usage examples such as partial updates, upserts, and scripted updates, see the External documentation.", "examples": { "UpdateRequestExample1": { "alternatives": [ @@ -47737,7 +48079,7 @@ } } ], - "specLocation": "_global/update/UpdateRequest.ts#L38-L194" + "specLocation": "_global/update/UpdateRequest.ts#L38-L196" }, { "kind": "response", @@ -47893,7 +48235,7 @@ } ] }, - "description": "Update documents.\nUpdates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.", + "description": "Update documents.\nUpdates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Refreshing shards**\n\nSpecifying the `refresh` parameter refreshes all shards once the request completes.\nThis is different to the update API's `refresh` parameter, which causes only the shard\nthat received the request to be refreshed. Unlike the update API, it does not support\n`wait_for`.\n\n**Running update by query asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch\nperforms some preflight checks, launches the request, and returns a\n[task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`.\n\n**Waiting for active shards**\n\n`wait_for_active_shards` controls how many copies of a shard must be active\nbefore proceeding with the request. See [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards)\nfor details. `timeout` controls how long each write request waits for unavailable\nshards to become available. Both work exactly the way they work in the\n[Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). Update by query uses scrolled searches, so you can also\nspecify the `scroll` parameter to control how long it keeps the search context\nalive, for example `?scroll=10m`. The default is 5 minutes.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\nRefer to the linked documentation for examples of how to update documents using the `_update_by_query` API:", "examples": { "UpdateByQueryRequestExample1": { "alternatives": [ @@ -48444,7 +48786,7 @@ } } ], - "specLocation": "_global/update_by_query/UpdateByQueryRequest.ts#L37-L339" + "specLocation": "_global/update_by_query/UpdateByQueryRequest.ts#L37-L349" }, { "kind": "response", @@ -99959,6 +100301,584 @@ ] } }, + { + "kind": "enum", + "isOpen": true, + "members": [ + { + "aliases": [ + "cs", + "completionSize" + ], + "description": "Size of completion. For example: `0b`.", + "name": "completion.size" + }, + { + "description": "Disk space used by the shard’s dataset, which may or may not be the size on\ndisk, but includes space used by the shard on object storage. Reported as a size value for example: `5kb`.", + "name": "dataset.size" + }, + { + "aliases": [ + "dvc", + "denseVectorCount" + ], + "description": "Number of indexed dense vectors.", + "name": "dense_vector.value_count" + }, + { + "aliases": [ + "d", + "dc" + ], + "description": "Number of documents in shard, for example: `25`.", + "name": "docs" + }, + { + "aliases": [ + "fe", + "fielddataEvictions" + ], + "description": "Fielddata cache evictions, for example: `0`.", + "name": "fielddata.evictions" + }, + { + "aliases": [ + "fm", + "fielddataMemory" + ], + "description": "Used fielddata cache memory, for example: `0b`.", + "name": "fielddata.memory_size" + }, + { + "aliases": [ + "ft", + "flushTotal" + ], + "description": "Number of flushes, for example: `1`.", + "name": "flush.total" + }, + { + "aliases": [ + "ftt", + "flushTotalTime" + ], + "description": "Time spent in flush, for example: `1`.", + "name": "flush.total_time" + }, + { + "aliases": [ + "gc", + "getCurrent" + ], + "description": "Number of current get operations, for example: `0`.", + "name": "get.current" + }, + { + "aliases": [ + "geti", + "getExistsTime" + ], + "description": "Time spent in successful gets, for example: `14ms`.", + "name": "get.exists_time" + }, + { + "aliases": [ + "geto", + "getExistsTotal" + ], + "description": "Number of successful get operations, for example: `2`.", + "name": "get.exists_total" + }, + { + "aliases": [ + "gmti", + "getMissingTime" + ], + "description": "Time spent in failed gets, for example: `0s`.", + "name": "get.missing_time" + }, + { + "aliases": [ + "gmto", + "getMissingTotal" + ], + "description": "Number of failed get operations, for example: `1`.", + "name": "get.missing_total" + }, + { + "aliases": [ + "gti", + "getTime" + ], + "description": "Time spent in get, for example: `14ms`.", + "name": "get.time" + }, + { + "aliases": [ + "gto", + "getTotal" + ], + "description": "Number of get operations, for example: `2`.", + "name": "get.total" + }, + { + "description": "ID of the node, for example: `k0zy`.", + "name": "id" + }, + { + "aliases": [ + "i", + "idx" + ], + "description": "Name of the index.", + "name": "index" + }, + { + "aliases": [ + "idc", + "indexingDeleteCurrent" + ], + "description": "Number of current deletion operations, for example: `0`.", + "name": "indexing.delete_current" + }, + { + "aliases": [ + "idti", + "indexingDeleteTime" + ], + "description": "Time spent in deletions, for example: `2ms`.", + "name": "indexing.delete_time" + }, + { + "aliases": [ + "idto", + "indexingDeleteTotal" + ], + "description": "Number of deletion operations, for example: `2`.", + "name": "indexing.delete_total" + }, + { + "aliases": [ + "iic", + "indexingIndexCurrent" + ], + "description": "Number of current indexing operations, for example: `0`.", + "name": "indexing.index_current" + }, + { + "aliases": [ + "iifvc", + "indexingIndexFailedDueToVersionConflict" + ], + "description": "Number of failed indexing operations due to version conflict, for example: `0`.", + "name": "indexing.index_failed_due_to_version_conflict" + }, + { + "aliases": [ + "iif", + "indexingIndexFailed" + ], + "description": "Number of failed indexing operations, for example: `0`.", + "name": "indexing.index_failed" + }, + { + "aliases": [ + "iiti", + "indexingIndexTime" + ], + "description": "Time spent in indexing, such as for example: `134ms`.", + "name": "indexing.index_time" + }, + { + "aliases": [ + "iito", + "indexingIndexTotal" + ], + "description": "Number of indexing operations, for example: `1`.", + "name": "indexing.index_total" + }, + { + "description": "IP address of the node, for example: `127.0.1.1`.", + "name": "ip" + }, + { + "aliases": [ + "mc", + "mergesCurrent" + ], + "description": "Number of current merge operations, for example: `0`.", + "name": "merges.current" + }, + { + "aliases": [ + "mcd", + "mergesCurrentDocs" + ], + "description": "Number of current merging documents, for example: `0`.", + "name": "merges.current_docs" + }, + { + "aliases": [ + "mcs", + "mergesCurrentSize" + ], + "description": "Size of current merges, for example: `0b`.", + "name": "merges.current_size" + }, + { + "aliases": [ + "mt", + "mergesTotal" + ], + "description": "Number of completed merge operations, for example: `0`.", + "name": "merges.total" + }, + { + "aliases": [ + "mtd", + "mergesTotalDocs" + ], + "description": "Number of merged documents, for example: `0`.", + "name": "merges.total_docs" + }, + { + "aliases": [ + "mts", + "mergesTotalSize" + ], + "description": "Size of current merges, for example: `0b`.", + "name": "merges.total_size" + }, + { + "aliases": [ + "mtt", + "mergesTotalTime" + ], + "description": "Time spent merging documents, for example: `0s`.", + "name": "merges.total_time" + }, + { + "aliases": [ + "n" + ], + "description": "Node name, for example: `I8hydUG`.", + "name": "node" + }, + { + "aliases": [ + "p", + "pr", + "primaryOrReplica" + ], + "description": "Shard type. Returned values are `primary` or `replica`.", + "name": "prirep" + }, + { + "aliases": [ + "qce", + "queryCacheEvictions" + ], + "description": "Query cache evictions, for example: `0`.", + "name": "query_cache.evictions" + }, + { + "aliases": [ + "qcm", + "queryCacheMemory" + ], + "description": "Used query cache memory, for example: `0b`.", + "name": "query_cache.memory_size" + }, + { + "aliases": [ + "rs" + ], + "description": "Type of recovery source.", + "name": "recoverysource.type" + }, + { + "aliases": [ + "rti", + "refreshTime" + ], + "description": "Time spent in refreshes, for example: `91ms`.", + "name": "refresh.time" + }, + { + "aliases": [ + "rto", + "refreshTotal" + ], + "description": "Number of refreshes, for example: `16`.", + "name": "refresh.total" + }, + { + "aliases": [ + "sfc", + "searchFetchCurrent" + ], + "description": "Current fetch phase operations, for example: `0`.", + "name": "search.fetch_current" + }, + { + "aliases": [ + "sfti", + "searchFetchTime" + ], + "description": "Time spent in fetch phase, for example: `37ms`.", + "name": "search.fetch_time" + }, + { + "aliases": [ + "sfto", + "searchFetchTotal" + ], + "description": "Number of fetch operations, for example: `7`.", + "name": "search.fetch_total" + }, + { + "aliases": [ + "so", + "searchOpenContexts" + ], + "description": "Open search contexts, for example: `0`.", + "name": "search.open_contexts" + }, + { + "aliases": [ + "sqc", + "searchQueryCurrent" + ], + "description": "Current query phase operations, for example: `0`.", + "name": "search.query_current" + }, + { + "aliases": [ + "sqti", + "searchQueryTime" + ], + "description": "Time spent in query phase, for example: `43ms`.", + "name": "search.query_time" + }, + { + "aliases": [ + "sqto", + "searchQueryTotal" + ], + "description": "Number of query operations, for example: `9`.", + "name": "search.query_total" + }, + { + "aliases": [ + "scc", + "searchScrollCurrent" + ], + "description": "Open scroll contexts, for example: `2`.", + "name": "search.scroll_current" + }, + { + "aliases": [ + "scti", + "searchScrollTime" + ], + "description": "Time scroll contexts held open, for example: `2m`.", + "name": "search.scroll_time" + }, + { + "aliases": [ + "scto", + "searchScrollTotal" + ], + "description": "Completed scroll contexts, for example: `1`.", + "name": "search.scroll_total" + }, + { + "aliases": [ + "sc", + "segmentsCount" + ], + "description": "Number of segments, for example: `4`.", + "name": "segments.count" + }, + { + "aliases": [ + "sfbm", + "fixedBitsetMemory" + ], + "description": "Memory used by fixed bit sets for nested object field types and type filters for types referred in join fields, for example: `1.0kb`.", + "name": "segments.fixed_bitset_memory" + }, + { + "aliases": [ + "siwm", + "segmentsIndexWriterMemory" + ], + "description": "Memory used by index writer, for example: `18mb`.", + "name": "segments.index_writer_memory" + }, + { + "aliases": [ + "sm", + "segmentsMemory" + ], + "description": "Memory used by segments, for example: `1.4kb`.", + "name": "segments.memory" + }, + { + "aliases": [ + "svmm", + "segmentsVersionMapMemory" + ], + "description": "Memory used by version map, for example: `1.0kb`.", + "name": "segments.version_map_memory" + }, + { + "aliases": [ + "sqg", + "globalCheckpoint" + ], + "description": "Global checkpoint.", + "name": "seq_no.global_checkpoint" + }, + { + "aliases": [ + "sql", + "localCheckpoint" + ], + "description": "Local checkpoint.", + "name": "seq_no.local_checkpoint" + }, + { + "aliases": [ + "sqm", + "maxSeqNo" + ], + "description": "Maximum sequence number.", + "name": "seq_no.max" + }, + { + "aliases": [ + "s", + "sh" + ], + "description": "Name of the shard.", + "name": "shard" + }, + { + "aliases": [ + "svc", + "sparseVectorCount" + ], + "description": "Number of indexed [sparse vectors](https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/sparse-vector).", + "name": "dsparse_vector.value_count" + }, + { + "aliases": [ + "st" + ], + "description": "State of the shard. Returned values are:\n* `INITIALIZING`: The shard is recovering from a peer shard or gateway.\n* `RELOCATING`: The shard is relocating.\n* `STARTED`: The shard has started.\n* `UNASSIGNED`: The shard is not assigned to any node.", + "name": "state" + }, + { + "aliases": [ + "sto" + ], + "description": "Disk space used by the shard, for example: `5kb`.", + "name": "store" + }, + { + "aliases": [ + "suc", + "suggestCurrent" + ], + "description": "Number of current suggest operations, for example: `0`.", + "name": "suggest.current" + }, + { + "aliases": [ + "suti", + "suggestTime" + ], + "description": "Time spent in suggest, for example: `0`.", + "name": "suggest.time" + }, + { + "aliases": [ + "suto", + "suggestTotal" + ], + "description": "Number of suggest operations, for example: `0`.", + "name": "suggest.total" + }, + { + "description": "Sync ID of the shard.", + "name": "sync_id" + }, + { + "aliases": [ + "ua" + ], + "description": "Time at which the shard became unassigned in [Coordinated Universal Time (UTC)](https://en.wikipedia.org/wiki/List_of_UTC_offsets).", + "name": "unassigned.at" + }, + { + "aliases": [ + "ud" + ], + "description": "Details about why the shard became unassigned. This does not explain why the shard is currently unassigned. To understand why a shard\nis not assigned, use the [Cluster allocation explain](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) API.", + "name": "unassigned.details" + }, + { + "aliases": [ + "uf" + ], + "description": "Time at which the shard was requested to be unassigned in [Coordinated Universal Time (UTC)](https://en.wikipedia.org/wiki/List_of_UTC_offsets).", + "name": "unassigned.for" + }, + { + "aliases": [ + "ur" + ], + "description": "Indicates the reason for the last change to the state of this unassigned shard. This does not explain why the shard is currently unassigned.\nTo understand why a shard is not assigned, use the [Cluster allocation explain](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) API. Returned values include:\n\n* `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard.\n* `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery.\n* `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index.\n* `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index.\n* `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the Cluster reroute API.\n* `INDEX_CLOSED`: Unassigned because the index was closed.\n* `INDEX_CREATED`: Unassigned as a result of an API creation of an index.\n* `INDEX_REOPENED`: Unassigned as a result of opening a closed index.\n* `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the Cluster reroute API.\n* `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index.\n* `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster.\n* `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the Node shutdown API.\n* `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed.\n* `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled.\n* `REINITIALIZED`: When a shard moves from started back to initializing.\n* `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica.\n* `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command.", + "name": "unassigned.reason" + } + ], + "name": { + "name": "CatShardColumn", + "namespace": "cat._types" + }, + "specLocation": "cat/_types/CatBase.ts#L1576-L1949" + }, + { + "kind": "type_alias", + "name": { + "name": "CatShardColumns", + "namespace": "cat._types" + }, + "specLocation": "cat/_types/CatBase.ts#L1950-L1950", + "type": { + "kind": "union_of", + "items": [ + { + "kind": "instance_of", + "type": { + "name": "CatShardColumn", + "namespace": "cat._types" + } + }, + { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "CatShardColumn", + "namespace": "cat._types" + } + } + } + ] + } + }, { "kind": "enum", "isOpen": true, @@ -100096,6 +101016,181 @@ ] } }, + { + "kind": "enum", + "isOpen": true, + "members": [ + { + "aliases": [ + "a" + ], + "description": "Number of active threads in the current thread pool.", + "name": "active" + }, + { + "aliases": [ + "c" + ], + "description": "Number of tasks completed by the thread pool executor.", + "name": "completed" + }, + { + "aliases": [ + "cr" + ], + "description": "Configured core number of active threads allowed in the current thread pool.", + "name": "core" + }, + { + "aliases": [ + "eid" + ], + "description": "Ephemeral node ID.", + "name": "ephemeral_id" + }, + { + "aliases": [ + "h" + ], + "description": "Hostname for the current node.", + "name": "host" + }, + { + "aliases": [ + "i" + ], + "description": "IP address for the current node.", + "name": "ip" + }, + { + "aliases": [ + "k" + ], + "description": "Configured keep alive time for threads.", + "name": "keep_alive" + }, + { + "aliases": [ + "l" + ], + "description": "Highest number of active threads in the current thread pool.", + "name": "largest" + }, + { + "aliases": [ + "mx" + ], + "description": "Configured maximum number of active threads allowed in the current thread pool.", + "name": "max" + }, + { + "description": "Name of the thread pool, such as `analyze` or `generic`.", + "name": "name" + }, + { + "aliases": [ + "id" + ], + "description": "ID of the node, such as `k0zy`.", + "name": "node_id" + }, + { + "description": "Node name, such as `I8hydUG`.", + "name": "node_name" + }, + { + "aliases": [ + "p" + ], + "description": "Process ID of the running node.", + "name": "pid" + }, + { + "aliases": [ + "psz" + ], + "description": "Number of threads in the current thread pool.", + "name": "pool_size" + }, + { + "aliases": [ + "po" + ], + "description": "Bound transport port for the current node.", + "name": "port" + }, + { + "aliases": [ + "q" + ], + "description": "Number of tasks in the queue for the current thread pool.", + "name": "queue" + }, + { + "aliases": [ + "qs" + ], + "description": "Maximum number of tasks permitted in the queue for the current thread pool.", + "name": "queue_size" + }, + { + "aliases": [ + "r" + ], + "description": "Number of tasks rejected by the thread pool executor.", + "name": "rejected" + }, + { + "aliases": [ + "sz" + ], + "description": "Configured fixed number of active threads allowed in the current thread pool.", + "name": "size" + }, + { + "aliases": [ + "t" + ], + "description": "Type of thread pool. Returned values are `fixed`, `fixed_auto_queue_size`, `direct`, or `scaling`.", + "name": "type" + } + ], + "name": { + "name": "CatThreadPoolColumn", + "namespace": "cat._types" + }, + "specLocation": "cat/_types/CatBase.ts#L1952-L2052" + }, + { + "kind": "type_alias", + "name": { + "name": "CatThreadPoolColumns", + "namespace": "cat._types" + }, + "specLocation": "cat/_types/CatBase.ts#L2053-L2053", + "type": { + "kind": "union_of", + "items": [ + { + "kind": "instance_of", + "type": { + "name": "CatThreadPoolColumn", + "namespace": "cat._types" + } + }, + { + "kind": "array_of", + "value": { + "kind": "instance_of", + "type": { + "name": "CatThreadPoolColumn", + "namespace": "cat._types" + } + } + } + ] + } + }, { "kind": "enum", "members": [ @@ -110931,13 +112026,13 @@ "type": { "kind": "instance_of", "type": { - "name": "Names", - "namespace": "_types" + "name": "CatShardColumns", + "namespace": "cat._types" } } }, { - "description": "List of columns that determine how the table should be sorted.\nSorting defaults to ascending and can be changed by setting `:asc`\nor `:desc` as a suffix to the column name.", + "description": "A comma-separated list of column names or aliases that determines the sort order.\nSorting defaults to ascending and can be changed by setting `:asc`\nor `:desc` as a suffix to the column name.", "name": "s", "required": false, "type": { @@ -110949,7 +112044,7 @@ } }, { - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -110962,7 +112057,7 @@ } }, { - "description": "Unit used to display time values.", + "description": "The unit used to display time values.", "name": "time", "required": false, "type": { @@ -113477,13 +114572,13 @@ "type": { "kind": "instance_of", "type": { - "name": "Names", - "namespace": "_types" + "name": "CatThreadPoolColumns", + "namespace": "cat._types" } } }, { - "description": "List of columns that determine how the table should be sorted.\nSorting defaults to ascending and can be changed by setting `:asc`\nor `:desc` as a suffix to the column name.", + "description": "A comma-separated list of column names or aliases that determines the sort order.\nSorting defaults to ascending and can be changed by setting `:asc`\nor `:desc` as a suffix to the column name.", "name": "s", "required": false, "type": { @@ -113520,7 +114615,7 @@ } }, { - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -220175,7 +221270,7 @@ "description": "", "version": "8.11.0" }, - "description": "Search rolled-up data.\nThe rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data.\nIt rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.\n\nThe request body supports a subset of features from the regular search API.\nThe following functionality is not available:\n\n`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.\n`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed.\n\n**Searching both historical rollup and non-rollup data**\n\nThe rollup search API has the capability to search across both \"live\" non-rollup data and the aggregated rollup data.\nThis is done by simply adding the live indices to the URI. For example:\n\n```\nGET sensor-1,sensor_rollup/_rollup_search\n{\n \"size\": 0,\n \"aggregations\": {\n \"max_temperature\": {\n \"max\": {\n \"field\": \"temperature\"\n }\n }\n }\n}\n```\n\nThe rollup search endpoint does two things when the search runs:\n\n* The original request is sent to the non-rollup index unaltered.\n* A rewritten version of the original request is sent to the rollup index.\n\nWhen the two responses are received, the endpoint rewrites the rollup response and merges the two together.\nDuring the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.", + "description": "Search rolled-up data.\nThe rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data.\nIt rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.\n\nThe request body supports a subset of features from the regular search API.\nThe following functionality is not available:\n\n`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely.\n`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed.\n\nFor more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation.", "examples": { "RollupSearchRequestExample1": { "alternatives": [ @@ -220259,7 +221354,7 @@ } } ], - "specLocation": "rollup/rollup_search/RollupSearchRequest.ts#L27-L109" + "specLocation": "rollup/rollup_search/RollupSearchRequest.ts#L27-L86" }, { "kind": "response", @@ -236107,6 +237202,70 @@ "method_request": "GET /_security/_query/api_key?with_limited_by=true", "summary": "Query API keys by ID", "value": "{\n \"query\": {\n \"ids\": {\n \"values\": [\n \"VuaCfGcBCdbkQm-e5aOx\"\n ]\n }\n }\n}" + }, + "QueryApiKeysRequestExample2": { + "alternatives": [ + { + "code": "resp = client.security.query_api_keys(\n query={\n \"bool\": {\n \"must\": [\n {\n \"prefix\": {\n \"name\": \"app1-key-\"\n }\n },\n {\n \"term\": {\n \"invalidated\": \"false\"\n }\n }\n ],\n \"must_not\": [\n {\n \"term\": {\n \"name\": \"app1-key-01\"\n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"username\": \"org-*-user\"\n }\n },\n {\n \"term\": {\n \"metadata.environment\": \"production\"\n }\n }\n ]\n }\n },\n from=20,\n size=10,\n sort=[\n {\n \"creation\": {\n \"order\": \"desc\",\n \"format\": \"date_time\"\n }\n },\n \"name\"\n ],\n)", + "language": "Python" + }, + { + "code": "const response = await client.security.queryApiKeys({\n query: {\n bool: {\n must: [\n {\n prefix: {\n name: \"app1-key-\",\n },\n },\n {\n term: {\n invalidated: \"false\",\n },\n },\n ],\n must_not: [\n {\n term: {\n name: \"app1-key-01\",\n },\n },\n ],\n filter: [\n {\n wildcard: {\n username: \"org-*-user\",\n },\n },\n {\n term: {\n \"metadata.environment\": \"production\",\n },\n },\n ],\n },\n },\n from: 20,\n size: 10,\n sort: [\n {\n creation: {\n order: \"desc\",\n format: \"date_time\",\n },\n },\n \"name\",\n ],\n});", + "language": "JavaScript" + }, + { + "code": "response = client.security.query_api_keys(\n body: {\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"prefix\": {\n \"name\": \"app1-key-\"\n }\n },\n {\n \"term\": {\n \"invalidated\": \"false\"\n }\n }\n ],\n \"must_not\": [\n {\n \"term\": {\n \"name\": \"app1-key-01\"\n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"username\": \"org-*-user\"\n }\n },\n {\n \"term\": {\n \"metadata.environment\": \"production\"\n }\n }\n ]\n }\n },\n \"from\": 20,\n \"size\": 10,\n \"sort\": [\n {\n \"creation\": {\n \"order\": \"desc\",\n \"format\": \"date_time\"\n }\n },\n \"name\"\n ]\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->security()->queryApiKeys([\n \"body\" => [\n \"query\" => [\n \"bool\" => [\n \"must\" => array(\n [\n \"prefix\" => [\n \"name\" => \"app1-key-\",\n ],\n ],\n [\n \"term\" => [\n \"invalidated\" => \"false\",\n ],\n ],\n ),\n \"must_not\" => array(\n [\n \"term\" => [\n \"name\" => \"app1-key-01\",\n ],\n ],\n ),\n \"filter\" => array(\n [\n \"wildcard\" => [\n \"username\" => \"org-*-user\",\n ],\n ],\n [\n \"term\" => [\n \"metadata.environment\" => \"production\",\n ],\n ],\n ),\n ],\n ],\n \"from\" => 20,\n \"size\" => 10,\n \"sort\" => array(\n [\n \"creation\" => [\n \"order\" => \"desc\",\n \"format\" => \"date_time\",\n ],\n ],\n \"name\",\n ),\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X GET -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"query\":{\"bool\":{\"must\":[{\"prefix\":{\"name\":\"app1-key-\"}},{\"term\":{\"invalidated\":\"false\"}}],\"must_not\":[{\"term\":{\"name\":\"app1-key-01\"}}],\"filter\":[{\"wildcard\":{\"username\":\"org-*-user\"}},{\"term\":{\"metadata.environment\":\"production\"}}]}},\"from\":20,\"size\":10,\"sort\":[{\"creation\":{\"order\":\"desc\",\"format\":\"date_time\"}},\"name\"]}' \"$ELASTICSEARCH_URL/_security/_query/api_key\"", + "language": "curl" + }, + { + "code": "client.security().queryApiKeys(q -> q\n .from(20)\n .query(qu -> qu\n .bool(b -> b\n .filter(List.of(Query.of(que -> que\n .wildcard(w -> w\n .field(\"username\")\n .value(\"org-*-user\")\n )),Query.of(quer -> quer\n .term(t -> t\n .field(\"metadata.environment\")\n .value(FieldValue.of(\"production\"))\n ))))\n .must(List.of(Query.of(query -> query\n .prefix(p -> p\n .field(\"name\")\n .value(\"app1-key-\")\n )),Query.of(query1 -> query1\n .term(t -> t\n .field(\"invalidated\")\n .value(FieldValue.of(\"false\"))\n ))))\n .mustNot(m -> m\n .term(t -> t\n .field(\"name\")\n .value(FieldValue.of(\"app1-key-01\"))\n )\n )\n )\n )\n .size(10)\n .sort(List.of(SortOptions.of(s -> s\n .field(f -> f\n .field(\"creation\")\n .order(SortOrder.Desc)\n .format(\"date_time\")\n )),SortOptions.of(so -> so\n .field(f -> f\n .field(\"name\")\n ))))\n);\n", + "language": "Java" + } + ], + "description": "Run `GET /_security/_query/api_key`. Use a `bool` query to issue complex logical conditions and use `from`, `size`, and `sort` to help paginate the result. For example, the API key name must begin with `app1-key-` and must not be `app1-key-01`. It must be owned by a username with the wildcard pattern `org-*-user` and the `environment` metadata field must have a `production` value. The offset to begin the search result is the twentieth (zero-based index) API key. The page size of the response is 10 API keys. The result is first sorted by creation date in descending order, then by name in ascending order.\n", + "method_request": "GET /_security/_query/api_key", + "summary": "Query API keys with pagination", + "value": "{\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"prefix\": {\n \"name\": \"app1-key-\" \n }\n },\n {\n \"term\": {\n \"invalidated\": \"false\" \n }\n }\n ],\n \"must_not\": [\n {\n \"term\": {\n \"name\": \"app1-key-01\" \n }\n }\n ],\n \"filter\": [\n {\n \"wildcard\": {\n \"username\": \"org-*-user\" \n }\n },\n {\n \"term\": {\n \"metadata.environment\": \"production\" \n }\n }\n ]\n }\n },\n \"from\": 20, \n \"size\": 10, \n \"sort\": [ \n { \"creation\": { \"order\": \"desc\", \"format\": \"date_time\" } },\n \"name\"\n ]\n}" + }, + "QueryApiKeysRequestExample3": { + "alternatives": [ + { + "code": "resp = client.security.query_api_keys(\n query={\n \"term\": {\n \"name\": {\n \"value\": \"application-key-1\"\n }\n }\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.security.queryApiKeys({\n query: {\n term: {\n name: {\n value: \"application-key-1\",\n },\n },\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.security.query_api_keys(\n body: {\n \"query\": {\n \"term\": {\n \"name\": {\n \"value\": \"application-key-1\"\n }\n }\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->security()->queryApiKeys([\n \"body\" => [\n \"query\" => [\n \"term\" => [\n \"name\" => [\n \"value\" => \"application-key-1\",\n ],\n ],\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X GET -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"query\":{\"term\":{\"name\":{\"value\":\"application-key-1\"}}}}' \"$ELASTICSEARCH_URL/_security/_query/api_key\"", + "language": "curl" + }, + { + "code": "client.security().queryApiKeys(q -> q\n .query(qu -> qu\n .term(t -> t\n .field(\"name\")\n .value(FieldValue.of(\"application-key-1\"))\n )\n )\n);\n", + "language": "Java" + } + ], + "description": "Run `GET /_security/_query/api_key` to retrieve the API key by name.", + "method_request": "GET /_security/_query/api_key", + "summary": "Query API keys by name", + "value": "{\n \"query\": {\n \"term\": {\n \"name\": {\n \"value\": \"application-key-1\"\n }\n }\n }\n}" } }, "inherits": { @@ -236249,6 +237408,23 @@ } ] }, + "examples": { + "QueryApiKeysResponseExample1": { + "description": "A successful response from `GET /_security/_query/api_key?with_limited_by=true`. The `limited_by` details are the owner user's permissions associated with the API key. It is a point-in-time snapshot captured at creation and subsequent updates. An API key's effective permissions are an intersection of its assigned privileges and the owner user's permissions.\n", + "summary": "Query API keys by ID", + "value": "{\n \"api_keys\": [\n {\n \"id\": \"VuaCfGcBCdbkQm-e5aOx\",\n \"name\": \"application-key-1\",\n \"creation\": 1548550550158,\n \"expiration\": 1548551550158,\n \"invalidated\": false,\n \"username\": \"myuser\",\n \"realm\": \"native1\",\n \"realm_type\": \"native\",\n \"metadata\": {\n \"application\": \"my-application\"\n },\n \"role_descriptors\": { },\n \"limited_by\": [ \n {\n \"role-power-user\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"*\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample2": { + "description": "An abbreviated response from `GET /_security/_query/api_key` that contains a list of matched API keys along with their sort values. The first sort value is creation time, which is displayed in `date_time` format. The second sort value is the API key name.\n", + "summary": "Query API keys with pagination", + "value": "{\n \"total\": 100,\n \"count\": 10,\n \"api_keys\": [\n {\n \"id\": \"CLXgVnsBOGkf8IyjcXU7\",\n \"name\": \"app1-key-79\",\n \"creation\": 1629250154811,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:14.811Z\", \n \"app1-key-79\" \n ]\n },\n {\n \"id\": \"BrXgVnsBOGkf8IyjbXVB\",\n \"name\": \"app1-key-78\",\n \"creation\": 1629250153794,\n \"invalidated\": false,\n \"username\": \"org-admin-user\",\n \"realm\": \"native1\",\n \"metadata\": {\n \"environment\": \"production\"\n },\n \"role_descriptors\": { },\n \"_sort\": [\n \"2021-08-18T01:29:13.794Z\",\n \"app1-key-78\"\n ]\n }\n ]\n}" + }, + "QueryApiKeysResponseExample3": { + "description": "A successful response from `GET /_security/_query/api_key`. It includes the role descriptors that are assigned to each API key when it was created or last updated. Note that an API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of the owner user's permissions. An empty role descriptors object means the API key inherits the owner user's permissions.\n", + "summary": "Query all API keys", + "value": "{\n \"total\": 3,\n \"count\": 3,\n \"api_keys\": [ \n {\n \"id\": \"nkvrGXsB8w290t56q3Rg\",\n \"name\": \"my-api-key-1\",\n \"creation\": 1628227480421,\n \"expiration\": 1629091480421,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"realm_type\": \"reserved\",\n \"metadata\": {\n \"letter\": \"a\"\n },\n \"role_descriptors\": { \n \"role-a\": {\n \"cluster\": [\n \"monitor\"\n ],\n \"indices\": [\n {\n \"names\": [\n \"index-a\"\n ],\n \"privileges\": [\n \"read\"\n ],\n \"allow_restricted_indices\": false\n }\n ],\n \"applications\": [ ],\n \"run_as\": [ ],\n \"metadata\": { },\n \"transient_metadata\": {\n \"enabled\": true\n }\n }\n }\n },\n {\n \"id\": \"oEvrGXsB8w290t5683TI\",\n \"name\": \"my-api-key-2\",\n \"creation\": 1628227498953,\n \"expiration\": 1628313898953,\n \"invalidated\": false,\n \"username\": \"elastic\",\n \"realm\": \"reserved\",\n \"metadata\": {\n \"letter\": \"b\"\n },\n \"role_descriptors\": { } \n }\n ]\n}" + } + }, "name": { "name": "Response", "namespace": "security.query_api_keys" @@ -249988,27 +251164,8 @@ } } ], - "query": [ - { - "availability": { - "stack": { - "since": "9.1.0" - } - }, - "description": "If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning.\nIf `false`, analyzers will not be reloaded with the new synonym set", - "name": "refresh", - "required": false, - "serverDefault": true, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } - } - ], - "specLocation": "synonyms/put_synonym/SynonymsPutRequest.ts#L23-L67" + "query": [], + "specLocation": "synonyms/put_synonym/SynonymsPutRequest.ts#L23-L58" }, { "kind": "response", @@ -261255,6 +262412,70 @@ "method_request": "POST _watcher/watch/my_watch/_execute", "summary": "Run a watch", "value": "{\n \"trigger_data\" : { \n \"triggered_time\" : \"now\",\n \"scheduled_time\" : \"now\"\n },\n \"alternative_input\" : { \n \"foo\" : \"bar\"\n },\n \"ignore_condition\" : true, \n \"action_modes\" : {\n \"my-action\" : \"force_simulate\" \n },\n \"record_execution\" : true \n}" + }, + "WatcherExecuteRequestExample2": { + "alternatives": [ + { + "code": "resp = client.watcher.execute_watch(\n id=\"my_watch\",\n action_modes={\n \"action1\": \"force_simulate\",\n \"action2\": \"skip\"\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.watcher.executeWatch({\n id: \"my_watch\",\n action_modes: {\n action1: \"force_simulate\",\n action2: \"skip\",\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.watcher.execute_watch(\n id: \"my_watch\",\n body: {\n \"action_modes\": {\n \"action1\": \"force_simulate\",\n \"action2\": \"skip\"\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->watcher()->executeWatch([\n \"id\" => \"my_watch\",\n \"body\" => [\n \"action_modes\" => [\n \"action1\" => \"force_simulate\",\n \"action2\" => \"skip\",\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"action_modes\":{\"action1\":\"force_simulate\",\"action2\":\"skip\"}}' \"$ELASTICSEARCH_URL/_watcher/watch/my_watch/_execute\"", + "language": "curl" + }, + { + "code": "client.watcher().executeWatch(e -> e\n .actionModes(Map.of(\"action1\", ActionExecutionMode.ForceSimulate,\"action2\", ActionExecutionMode.Skip))\n .id(\"my_watch\")\n);\n", + "language": "Java" + } + ], + "description": "Run `POST _watcher/watch/my_watch/_execute` and set a different mode for each action.\n", + "method_request": "POST _watcher/watch/my_watch/_execute", + "summary": "Run a watch with multiple action modes", + "value": "{\n \"action_modes\" : {\n \"action1\" : \"force_simulate\",\n \"action2\" : \"skip\"\n }\n}" + }, + "WatcherExecuteRequestExample3": { + "alternatives": [ + { + "code": "resp = client.watcher.execute_watch(\n watch={\n \"trigger\": {\n \"schedule\": {\n \"interval\": \"10s\"\n }\n },\n \"input\": {\n \"search\": {\n \"request\": {\n \"indices\": [\n \"logs\"\n ],\n \"body\": {\n \"query\": {\n \"match\": {\n \"message\": \"error\"\n }\n }\n }\n }\n }\n },\n \"condition\": {\n \"compare\": {\n \"ctx.payload.hits.total\": {\n \"gt\": 0\n }\n }\n },\n \"actions\": {\n \"log_error\": {\n \"logging\": {\n \"text\": \"Found {{ctx.payload.hits.total}} errors in the logs\"\n }\n }\n }\n },\n)", + "language": "Python" + }, + { + "code": "const response = await client.watcher.executeWatch({\n watch: {\n trigger: {\n schedule: {\n interval: \"10s\",\n },\n },\n input: {\n search: {\n request: {\n indices: [\"logs\"],\n body: {\n query: {\n match: {\n message: \"error\",\n },\n },\n },\n },\n },\n },\n condition: {\n compare: {\n \"ctx.payload.hits.total\": {\n gt: 0,\n },\n },\n },\n actions: {\n log_error: {\n logging: {\n text: \"Found {{ctx.payload.hits.total}} errors in the logs\",\n },\n },\n },\n },\n});", + "language": "JavaScript" + }, + { + "code": "response = client.watcher.execute_watch(\n body: {\n \"watch\": {\n \"trigger\": {\n \"schedule\": {\n \"interval\": \"10s\"\n }\n },\n \"input\": {\n \"search\": {\n \"request\": {\n \"indices\": [\n \"logs\"\n ],\n \"body\": {\n \"query\": {\n \"match\": {\n \"message\": \"error\"\n }\n }\n }\n }\n }\n },\n \"condition\": {\n \"compare\": {\n \"ctx.payload.hits.total\": {\n \"gt\": 0\n }\n }\n },\n \"actions\": {\n \"log_error\": {\n \"logging\": {\n \"text\": \"Found {{ctx.payload.hits.total}} errors in the logs\"\n }\n }\n }\n }\n }\n)", + "language": "Ruby" + }, + { + "code": "$resp = $client->watcher()->executeWatch([\n \"body\" => [\n \"watch\" => [\n \"trigger\" => [\n \"schedule\" => [\n \"interval\" => \"10s\",\n ],\n ],\n \"input\" => [\n \"search\" => [\n \"request\" => [\n \"indices\" => array(\n \"logs\",\n ),\n \"body\" => [\n \"query\" => [\n \"match\" => [\n \"message\" => \"error\",\n ],\n ],\n ],\n ],\n ],\n ],\n \"condition\" => [\n \"compare\" => [\n \"ctx.payload.hits.total\" => [\n \"gt\" => 0,\n ],\n ],\n ],\n \"actions\" => [\n \"log_error\" => [\n \"logging\" => [\n \"text\" => \"Found {{ctx.payload.hits.total}} errors in the logs\",\n ],\n ],\n ],\n ],\n ],\n]);", + "language": "PHP" + }, + { + "code": "curl -X POST -H \"Authorization: ApiKey $ELASTIC_API_KEY\" -H \"Content-Type: application/json\" -d '{\"watch\":{\"trigger\":{\"schedule\":{\"interval\":\"10s\"}},\"input\":{\"search\":{\"request\":{\"indices\":[\"logs\"],\"body\":{\"query\":{\"match\":{\"message\":\"error\"}}}}}},\"condition\":{\"compare\":{\"ctx.payload.hits.total\":{\"gt\":0}}},\"actions\":{\"log_error\":{\"logging\":{\"text\":\"Found {{ctx.payload.hits.total}} errors in the logs\"}}}}}' \"$ELASTICSEARCH_URL/_watcher/watch/_execute\"", + "language": "curl" + }, + { + "code": "client.watcher().executeWatch(e -> e\n .watch(w -> w\n .actions(\"log_error\", a -> a\n .logging(l -> l\n .text(\"Found {{ctx.payload.hits.total}} errors in the logs\")\n )\n )\n .condition(c -> c\n .compare(NamedValue.of(\"ctx.payload.hits.total\",Pair.of(ConditionOp.Gt,FieldValue.of(0))))\n )\n .input(i -> i\n .search(s -> s\n .request(r -> r\n .body(b -> b\n .query(q -> q\n .match(m -> m\n .field(\"message\")\n .query(FieldValue.of(\"error\"))\n )\n )\n )\n .indices(\"logs\")\n )\n )\n )\n .trigger(t -> t\n .schedule(sc -> sc\n .interval(in -> in\n .time(\"10s\")\n )\n )\n )\n )\n);\n", + "language": "Java" + } + ], + "description": "Run `POST _watcher/watch/_execute` to run a watch inline. All other settings for this API still apply when inlining a watch. In this example, while the inline watch defines a compare condition, during the execution this condition will be ignored.\n", + "method_request": "POST _watcher/watch/_execute", + "summary": "Run a watch inline", + "value": "{\n \"watch\" : {\n \"trigger\" : { \"schedule\" : { \"interval\" : \"10s\" } },\n \"input\" : {\n \"search\" : {\n \"request\" : {\n \"indices\" : [ \"logs\" ],\n \"body\" : {\n \"query\" : {\n \"match\" : { \"message\": \"error\" }\n }\n }\n }\n }\n },\n \"condition\" : {\n \"compare\" : { \"ctx.payload.hits.total\" : { \"gt\" : 0 }}\n },\n \"actions\" : {\n \"log_error\" : {\n \"logging\" : {\n \"text\" : \"Found {{ctx.payload.hits.total}} errors in the logs\"\n }\n }\n }\n }\n}" } }, "inherits": { diff --git a/output/typescript/types.ts b/output/typescript/types.ts index 6e382645b8..f65c4e3623 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -7145,10 +7145,18 @@ export type CatCatSegmentsColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' export type CatCatSegmentsColumns = CatCatSegmentsColumn | CatCatSegmentsColumn[] +export type CatCatShardColumn = 'completion.size' | 'cs' | 'completionSize' | 'dataset.size' | 'dense_vector.value_count' | 'dvc' | 'denseVectorCount' | 'docs' | 'd' | 'dc' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'id' | 'index' | 'i' | 'idx' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'node' | 'n' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'recoverysource.type' | 'rs' | 'refresh.time' | 'rti' | 'refreshTime' | 'refresh.total' | 'rto' | 'refreshTotal' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'seq_no.global_checkpoint' | 'sqg' | 'globalCheckpoint' | 'seq_no.local_checkpoint' | 'sql' | 'localCheckpoint' | 'seq_no.max' | 'sqm' | 'maxSeqNo' | 'shard' | 's' | 'sh' | 'dsparse_vector.value_count' | 'svc' | 'sparseVectorCount' | 'state' | 'st' | 'store' | 'sto' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'sync_id' | 'unassigned.at' | 'ua' | 'unassigned.details' | 'ud' | 'unassigned.for' | 'uf' | 'unassigned.reason' | 'ur'| string + +export type CatCatShardColumns = CatCatShardColumn | CatCatShardColumn[] + export type CatCatSnapshotsColumn = 'id' | 'snapshot' | 'repository' | 're' | 'repo' | 'status' | 's' | 'start_epoch' | 'ste' | 'startEpoch' | 'start_time' | 'sti' | 'startTime' | 'end_epoch' | 'ete' | 'endEpoch' | 'end_time' | 'eti' | 'endTime' | 'duration' | 'dur' | 'indices' | 'i' | 'successful_shards' | 'ss' | 'failed_shards' | 'fs' | 'total_shards' | 'ts' | 'reason' | 'r'| string export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatNodeColumn[] +export type CatCatThreadPoolColumn = 'active' | 'a' | 'completed' | 'c' | 'core' | 'cr' | 'ephemeral_id' | 'eid' | 'host' | 'h' | 'ip' | 'i' | 'keep_alive' | 'k' | 'largest' | 'l' | 'max' | 'mx' | 'name' | 'node_id' | 'id' | 'node_name' | 'pid' | 'p' | 'pool_size' | 'psz' | 'port' | 'po' | 'queue' | 'q' | 'queue_size' | 'qs' | 'rejected' | 'r' | 'size' | 'sz' | 'type' | 't'| string + +export type CatCatThreadPoolColumns = CatCatThreadPoolColumn | CatCatThreadPoolColumn[] + export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] @@ -8530,7 +8538,7 @@ export interface CatSegmentsSegmentsRecord { export interface CatShardsRequest extends CatCatRequestBase { index?: Indices bytes?: Bytes - h?: Names + h?: CatCatShardColumns s?: Names master_timeout?: Duration time?: TimeUnit @@ -8873,7 +8881,7 @@ export interface CatTemplatesTemplatesRecord { export interface CatThreadPoolRequest extends CatCatRequestBase { thread_pool_patterns?: Names - h?: Names + h?: CatCatThreadPoolColumns s?: Names time?: TimeUnit local?: boolean