Skip to content

Commit 422c1dd

Browse files
authored
Merge branch '8.19' into backport/8.x/pr-117595
2 parents 9ea1d19 + e2270e0 commit 422c1dd

File tree

245 files changed

+6770
-3139
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

245 files changed

+6770
-3139
lines changed

.backportrc.json

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
11
{
22
"upstream" : "elastic/elasticsearch",
3-
"targetBranchChoices" : [ "main", "8.x", "8.18", "8.17", "8.16", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ],
3+
"targetBranchChoices" : [ "main", "9.0", "8.19", "8.18", "8.17", "8.16", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ],
44
"targetPRLabels" : [ "backport" ],
55
"branchLabelMapping" : {
6-
"^v9.0.0$" : "main",
7-
"^v8.19.0$" : "8.x",
6+
"^v9.1.0$" : "main",
87
"^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$" : "$1.$2"
98
}
109
}

.buildkite/pipelines/periodic.template.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ steps:
217217
image: family/elasticsearch-ubuntu-2004
218218
machineType: n2-standard-8
219219
buildDirectory: /dev/shm/bk
220-
if: build.branch == "main" || build.branch == "8.x" || build.branch == "7.17"
220+
if: build.branch == "main" || build.branch == "8.19" || build.branch == "7.17"
221221
- label: check-branch-consistency
222222
command: .ci/scripts/run-gradle.sh branchConsistency
223223
timeout_in_minutes: 15

.buildkite/pipelines/periodic.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -940,7 +940,7 @@ steps:
940940
image: family/elasticsearch-ubuntu-2004
941941
machineType: n2-standard-8
942942
buildDirectory: /dev/shm/bk
943-
if: build.branch == "main" || build.branch == "8.x" || build.branch == "7.17"
943+
if: build.branch == "main" || build.branch == "8.19" || build.branch == "7.17"
944944
- label: check-branch-consistency
945945
command: .ci/scripts/run-gradle.sh branchConsistency
946946
timeout_in_minutes: 15

.buildkite/scripts/lucene-snapshot/update-branch.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ fi
1010
if [[ "$BUILDKITE_BRANCH" == "lucene_snapshot_10" ]]; then
1111
UPSTREAM="main"
1212
elif [[ "$BUILDKITE_BRANCH" == "lucene_snapshot" ]]; then
13-
UPSTREAM="8.x"
13+
UPSTREAM="8.19"
1414
else
1515
echo "Error: unknown branch: $BUILDKITE_BRANCH"
1616
exit 1

.ci/scripts/resolve-dra-manifest.sh

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,6 @@ if [ "$LATEST_VERSION" != "$ES_VERSION" ]; then
2424
echo "Latest build for '$ARTIFACT' is version $LATEST_VERSION but expected version $ES_VERSION." 1>&2
2525
NEW_BRANCH=$(echo $ES_VERSION | sed -E "s/([0-9]+\.[0-9]+)\.[0-9]/\1/g")
2626

27-
# Temporary
28-
if [[ "$ES_VERSION" == "8.16.0" ]]; then
29-
NEW_BRANCH="8.x"
30-
fi
31-
3227
echo "Using branch $NEW_BRANCH instead of $BRANCH." 1>&2
3328
LATEST_BUILD=$(fetch_build $WORKFLOW $ARTIFACT $NEW_BRANCH)
3429
fi

branches.json

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,13 @@
88
"branch": "9.0"
99
},
1010
{
11-
"branch": "8.18"
11+
"branch": "8.19"
1212
},
1313
{
14-
"branch": "8.17"
14+
"branch": "8.18"
1515
},
1616
{
17-
"branch": "8.x"
17+
"branch": "8.17"
1818
},
1919
{
2020
"branch": "7.17"

build.gradle

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -207,14 +207,6 @@ tasks.register("verifyVersions") {
207207
throw new GradleException("No branch choice exists for development branch ${unreleasedVersion.branch} in .backportrc.json.")
208208
}
209209
}
210-
String versionMapping = backportConfig.get("branchLabelMapping").fields().find { it.value.textValue() == '8.x' }.key
211-
String expectedMapping = "^v${versions.elasticsearch.replaceAll('-SNAPSHOT', '')}\$"
212-
if (versionMapping != expectedMapping) {
213-
throw new GradleException(
214-
"Backport label mapping for branch '8.x' is '${versionMapping}' but should be " +
215-
"'${expectedMapping}'. Update .backportrc.json."
216-
)
217-
}
218210
}
219211
}
220212

docs/build.gradle

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,6 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach {
8383
setting 'xpack.license.self_generated.type', 'trial'
8484
setting 'indices.lifecycle.history_index_enabled', 'false'
8585
keystorePassword 'keystore-password'
86-
if (buildParams.snapshotBuild == false) {
87-
requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0)
88-
}
8986
}
9087

9188
// debug ccr test failures:
@@ -124,7 +121,6 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach {
124121

125122

126123
requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0")
127-
requiresFeature 'es.failure_store_feature_flag_enabled', Version.fromString("8.12.0")
128124

129125
// TODO Rene: clean up this kind of cross project file references
130126
extraConfigFile 'op-jwks.json', project(':x-pack:test:idp-fixture').file("src/main/resources/oidc/op-jwks.json")

docs/changelog/125517.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 125517
2+
summary: Semantic Text Chunking Indexing Pressure
3+
area: Machine Learning
4+
type: enhancement
5+
issues: []

docs/changelog/125922.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 125922
2+
summary: Fix text structure NPE when fields in list have null value
3+
area: Machine Learning
4+
type: bug
5+
issues: []

docs/changelog/126884.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 126884
2+
summary: Rare terms aggregation false **positive** fix
3+
area: Aggregations
4+
type: bug
5+
issues: []

docs/changelog/126973.yaml

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
pr: 126973
2+
summary: Add ability to redirect ingestion failures on data streams to a failure store
3+
area: Data streams
4+
type: feature
5+
issues: []
6+
highlight:
7+
title: Add ability to redirect ingestion failures on data streams to a failure store
8+
body: |-
9+
Documents that encountered ingest pipeline failures or mapping conflicts
10+
would previously be returned to the client as errors in the bulk and
11+
index operations. Many client applications are not equipped to respond
12+
to these failures. This leads to the failed documents often being
13+
dropped by the client which cannot hold the broken documents
14+
indefinitely. In many end user workloads, these failed documents
15+
represent events that could be critical signals for observability or
16+
security use cases.
17+
18+
To help mitigate this problem, data streams can now maintain a "failure
19+
store" which is used to accept and hold documents that fail to be
20+
ingested due to preventable configuration errors. The data stream's
21+
failure store operates like a separate set of backing indices with their
22+
own mappings and access patterns that allow Elasticsearch to accept
23+
documents that would otherwise be rejected due to unhandled ingest
24+
pipeline exceptions or mapping conflicts.
25+
26+
Users can enable redirection of ingest failures to the failure store on
27+
new data streams by specifying it in the new `data_stream_options` field
28+
inside of a component or index template:
29+
30+
[source,yaml]
31+
----
32+
PUT _index_template/my-template
33+
{
34+
"index_patterns": ["logs-test-*"],
35+
"data_stream": {},
36+
"template": {
37+
"data_stream_options": {
38+
"failure_store": {
39+
"enabled": true
40+
}
41+
}
42+
}
43+
}'
44+
----
45+
46+
Existing data streams can be configured with the new data stream
47+
`_options` endpoint:
48+
49+
[source,yaml]
50+
----
51+
PUT _data_stream/logs-test-apache/_options
52+
{
53+
"failure_store": {
54+
"enabled": "true"
55+
}
56+
}
57+
----
58+
59+
When redirection is enabled, any ingestion related failures will be
60+
captured in the failure store if the cluster is able to, along with the
61+
timestamp that the failure occurred, details about the error
62+
encountered, and the document that could not be ingested. Since failure
63+
stores are a kind of Elasticsearch index, we can search the data stream
64+
for the failures that it has collected. The failures are not shown by
65+
default as they are stored in different indices than the normal data
66+
stream data. In order to retrieve the failures, we use the `_search` API
67+
along with a new bit of index pattern syntax, the `::` selector.
68+
69+
[source,yaml]
70+
----
71+
POST logs-test-apache::failures/_search
72+
----
73+
74+
This index syntax informs the search operation to target the indices in
75+
its failure store instead of its backing indices. It can be mixed in a
76+
number of ways with other index patterns to include their failure store
77+
indices in the search operation:
78+
79+
[source,yaml]
80+
----
81+
POST logs-*::failures/_search
82+
POST logs-*,logs-*::failures/_search
83+
POST *::failures/_search
84+
POST _query
85+
{
86+
"query": "FROM my_data_stream*::failures"
87+
}
88+
----
89+
notable: true

docs/changelog/127225.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 127225
2+
summary: Fix count optimization with pushable union types
3+
area: ES|QL
4+
type: bug
5+
issues:
6+
- 127200

docs/changelog/127351.yaml

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
pr: 127351
2+
summary: Allow partial results by default in ES|QL
3+
area: ES|QL
4+
type: breaking
5+
issues: [122802]
6+
7+
breaking:
8+
title: Allow partial results by default in ES|QL
9+
area: ES|QL
10+
details: >-
11+
In earlier versions of {es}, ES|QL would fail the entire query if it encountered any error. ES|QL now returns partial results instead of failing when encountering errors.
12+
13+
impact: >-
14+
Callers should check the `is_partial` flag returned in the response to determine if the result is partial or complete. If returning partial results is not desired, this option can be overridden per request via an `allow_partial_results` parameter in the query URL or globally via the cluster setting `esql.query.allow_partial_results`.
15+
16+
notable: true

docs/changelog/127353.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 127353
2+
summary: Updating tika to 2.9.3
3+
area: Ingest Node
4+
type: upgrade
5+
issues: []

docs/changelog/127522.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 127522
2+
summary: Use INTERNAL_INGEST for Inference
3+
area: Machine Learning
4+
type: bug
5+
issues:
6+
- 127519

docs/changelog/127527.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 127527
2+
summary: "No, line noise isn't a valid ip"
3+
area: ES|QL
4+
type: bug
5+
issues: []

docs/plugins/development/creating-classic-plugins.asciidoc

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,14 @@ The entitlements currently implemented and enforced in {es} that are available t
8989

9090
===== `manage_threads`
9191

92-
Allows code to call methods that create or modify properties on Java Threads, for example `Thread#start` or `ThreadGroup#setMaxPriority`. In general, setting the name, priority, daemon state and context class loader are things no plugins should do when executing on
93-
{es} threadpools; however, many 3rd party libraries that support async operations (e.g. Apache HTTP client) need to manage their own threads. In this case it is justifiable to request this entitlement.
92+
Allows code to call methods that create or modify properties on Java Threads, for example `Thread#start` or `ThreadGroup#setMaxPriority`.
93+
94+
[NOTE]
95+
====
96+
This entitlement is rarely necessary. Your plugin should use {es} thread pools and executors (see `Plugin#getExecutorBuilders`) instead of creating and managing its own threads. Plugins should avoid modifying thread name, priority, daemon state, and context class loader when executing on ES threadpools.
97+
98+
However, many 3rd party libraries that support async operations, such as the Apache HTTP client, need to create and manage their own threads. In such cases, it makes sense to request this entitlement.
99+
====
94100

95101
Example:
96102
```yaml

docs/reference/cluster/stats.asciidoc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1422,7 +1422,7 @@ as a human-readable string.
14221422
(integer) The maximum time taken to execute a {ccs} request, in milliseconds.
14231423
14241424
`avg`:::
1425-
(integer) The median time taken to execute a {ccs} request, in milliseconds.
1425+
(integer) The average time taken to execute a {ccs} request, in milliseconds.
14261426
14271427
`p90`:::
14281428
(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds.
@@ -1440,7 +1440,7 @@ as a human-readable string.
14401440
(integer) The maximum time taken to execute a {ccs} request, in milliseconds.
14411441
14421442
`avg`:::
1443-
(integer) The median time taken to execute a {ccs} request, in milliseconds.
1443+
(integer) The average time taken to execute a {ccs} request, in milliseconds.
14441444
14451445
`p90`:::
14461446
(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds.
@@ -1458,7 +1458,7 @@ as a human-readable string.
14581458
(integer) The maximum time taken to execute a {ccs} request, in milliseconds.
14591459
14601460
`avg`:::
1461-
(integer) The median time taken to execute a {ccs} request, in milliseconds.
1461+
(integer) The average time taken to execute a {ccs} request, in milliseconds.
14621462
14631463
`p90`:::
14641464
(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds.
@@ -1518,7 +1518,7 @@ This may include requests where partial results were returned, but not requests
15181518
(integer) The maximum time taken to execute a {ccs} request, in milliseconds.
15191519

15201520
`avg`:::
1521-
(integer) The median time taken to execute a {ccs} request, in milliseconds.
1521+
(integer) The average time taken to execute a {ccs} request, in milliseconds.
15221522

15231523
`p90`:::
15241524
(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds.

docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ PUT _index_template/dsl-data-stream-template
7272
----
7373
// TEST[continued]
7474

75-
We'll now index a document targetting `dsl-data-stream` to create the data stream
75+
We'll now index a document targeting `dsl-data-stream` to create the data stream
7676
and we'll also manually rollover the data stream to have another generation index created:
7777

7878
[source,console]
@@ -286,7 +286,7 @@ GET _data_stream/dsl-data-stream
286286
// TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/]
287287
// TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/]
288288
// TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/]
289-
// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/]
289+
// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":$body.data_streams.0.failure_store/]
290290

291291
<1> The existing backing index will continue to be managed by {ilm-init}
292292
<2> The existing backing index will continue to be managed by {ilm-init}
@@ -368,7 +368,7 @@ GET _data_stream/dsl-data-stream
368368
// TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/]
369369
// TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/]
370370
// TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/]
371-
// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/]
371+
// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":$body.data_streams.0.failure_store/]
372372

373373
<1> The backing indices that existed before rollover will continue to be managed by {ilm-init}
374374
<2> The backing indices that existed before rollover will continue to be managed by {ilm-init}
@@ -466,7 +466,7 @@ GET _data_stream/dsl-data-stream
466466
// TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/]
467467
// TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/]
468468
// TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/]
469-
// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": true}/]
469+
// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":$body.data_streams.0.failure_store/]
470470
<1> The write index is now managed by {ilm-init}
471471
<2> The `lifecycle` configured on the data stream is now disabled.
472472
<3> The next write index will be managed by {ilm-init}

docs/reference/esql/esql-across-clusters.asciidoc

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ Once the security model is configured, you can add remote clusters.
145145
include::{es-ref-dir}/search/search-your-data/search-across-clusters.asciidoc[tag=ccs-remote-cluster-setup]
146146

147147
<1> Since `skip_unavailable` was not set on `cluster_three`, it uses
148-
the default of `false`. See the <<ccq-skip-unavailable-clusters>>
148+
the default of `true`. See the <<ccq-skip-unavailable-clusters>>
149149
section for details.
150150

151151
[discrete]
@@ -283,7 +283,7 @@ searches are currently in that state. The clusters can have one of the following
283283
*successful* (searches on all shards were successful), *skipped* (the search
284284
failed on a cluster marked with `skip_unavailable`=`true`), *failed* (the search
285285
failed on a cluster marked with `skip_unavailable`=`false`) or **partial** (the search was
286-
<<esql-async-query-stop-api, interrupted>> before finishing).
286+
<<esql-async-query-stop-api, interrupted>> before finishing or has partially failed).
287287
<3> The `_clusters/details` section shows metadata about the search on each cluster.
288288
<4> If you included indices from the local cluster you sent the request to in your {ccs},
289289
it is identified as "(local)".
@@ -525,12 +525,9 @@ FROM my-index-000001,cluster*:my-index-*,cluster_three:-my-index-000001
525525
[[ccq-skip-unavailable-clusters]]
526526
==== Optional remote clusters
527527

528-
{ccs-cap} for {esql} currently does not respect the `skip_unavailable`
529-
setting. As a result, if a remote cluster specified in the request is
530-
unavailable or failed, {ccs} for {esql} queries will fail regardless of the setting.
531-
532-
We are actively working to align the behavior of {ccs} for {esql} with other
533-
{ccs} APIs.
528+
{ccs-cap} for {esql} will set the remote cluster which is disconnected from the querying cluster to `skipped`
529+
and continue the query with other clusters, unless the cluster's `skip_unavailable` setting is set to `false`,
530+
in which case the query will fail.
534531

535532
[discrete]
536533
[[ccq-during-upgrade]]

docs/reference/esql/esql-query-api.asciidoc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,9 @@ precedence.
6565

6666
`allow_partial_results`::
6767
(Optional, boolean) If `true`, partial results will be returned if there are shard failures, but
68-
the query can continue to execute on other clusters and shards. This defaults to the value of
69-
the cluster setting `esql.query.allow_partial_results`.
68+
the query can continue to execute on other clusters and shards. This defaults to `true`, unless
69+
the cluster setting `esql.query.allow_partial_results` is set to `false`, in which case it also
70+
defaults to `false`.
7071

7172
[discrete]
7273
[role="child_attributes"]

0 commit comments

Comments
 (0)