Skip to content

Skip search shards with INDEX_REFRESH_BLOCK #129132

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 28 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 26 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
2aa74e3
Skip indices that have an index refresh block
benchaplin Jun 4, 2025
12b6b81
Merge branch 'main' into skip_search_shards_with_index_block
benchaplin Jun 6, 2025
9c705cd
Construct the iterator skipped
benchaplin Jun 9, 2025
1c75721
Fix javadocs
benchaplin Jun 9, 2025
1ecc447
Add unit test
benchaplin Jun 9, 2025
cdb4bc1
[CI] Auto commit changes from spotless
elasticsearchmachine Jun 9, 2025
b7ade2d
Merge branch 'main' into skip_search_shards_with_index_block
benchaplin Jun 9, 2025
cd991c2
Merge branch 'main' into skip_search_shards_with_index_block
drempapis Jun 11, 2025
5f50d5c
Merge branch 'main' into skip_search_shards_with_index_block
drempapis Jun 11, 2025
3f86fb8
Rewrite DFS if processing one or zero unskipped shard iterators
benchaplin Jun 11, 2025
0edc27c
Make can-match support already skipped shard iterators
benchaplin Jun 11, 2025
9de6f06
Add IT for executing search and PIT against refresh blocked indices
benchaplin Jun 11, 2025
be37bf6
Fix resource leak by using decRef assertion
benchaplin Jun 11, 2025
17706e2
[CI] Auto commit changes from spotless
elasticsearchmachine Jun 11, 2025
8759a07
Merge branch 'main' into skip_search_shards_with_index_block
benchaplin Jun 11, 2025
bf8a2be
Improve names of valid shard check method and variables
benchaplin Jul 1, 2025
8887609
Merge branch 'main' into skip_search_shards_with_index_block
benchaplin Jul 1, 2025
0f0200a
Remove constructor used only in tests
benchaplin Jul 1, 2025
7689263
Fix missed merge conflict
benchaplin Jul 1, 2025
598e906
Remove ability to set INDEX_REFRESH_BLOCK from API, add directly to c…
benchaplin Jul 3, 2025
4cdfbd0
Merge branch 'main' into skip_search_shards_with_index_block
benchaplin Jul 3, 2025
76ecade
Merge branch 'main' into skip_search_shards_with_index_block
benchaplin Jul 3, 2025
86c9a5d
Rework change to ignore blocked indices before shard resolution
benchaplin Jul 15, 2025
7200edc
Merge branch 'main' into skip_search_shards_with_index_block
benchaplin Jul 15, 2025
d72600b
Clean up
benchaplin Jul 15, 2025
61d40c4
Add _msearch test case
benchaplin Jul 15, 2025
51d5196
Revert search type rewrite change
benchaplin Jul 18, 2025
24f2770
Merge branch 'main' into skip_search_shards_with_index_block
benchaplin Jul 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/

package org.elasticsearch.search;

import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.ClosePointInTimeRequest;
import org.elasticsearch.action.search.OpenPointInTimeRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.TransportClosePointInTimeAction;
import org.elasticsearch.action.search.TransportOpenPointInTimeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.ProjectId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.builder.PointInTimeBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.test.ESIntegTestCase;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;

import static org.elasticsearch.cluster.block.ClusterBlocks.EMPTY_CLUSTER_BLOCK;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse;

public class SearchWithIndexBlocksIT extends ESIntegTestCase {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we also want to have ESQL test for this case?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm tracking that to be a followup task.


public void testSearchIndicesWithIndexRefreshBlocks() {
List<String> indices = createIndices();
Map<String, Integer> numDocsPerIndex = indexDocuments(indices);
List<String> unblockedIndices = addIndexRefreshBlockToSomeIndices(indices);

int expectedHits = 0;
for (String index : unblockedIndices) {
expectedHits += numDocsPerIndex.get(index);
}

assertHitCount(prepareSearch().setQuery(QueryBuilders.matchAllQuery()), expectedHits);
}

public void testOpenPITOnIndicesWithIndexRefreshBlocks() {
List<String> indices = createIndices();
Map<String, Integer> numDocsPerIndex = indexDocuments(indices);
List<String> unblockedIndices = addIndexRefreshBlockToSomeIndices(indices);

int expectedHits = 0;
for (String index : unblockedIndices) {
expectedHits += numDocsPerIndex.get(index);
}

BytesReference pitId = null;
try {
OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest(indices.toArray(new String[0])).keepAlive(
TimeValue.timeValueSeconds(10)
).allowPartialSearchResults(true);
pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId();
SearchRequest searchRequest = new SearchRequest().source(
new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitId).setKeepAlive(TimeValue.timeValueSeconds(10)))
);
assertHitCount(client().search(searchRequest), expectedHits);
} finally {
if (pitId != null) {
client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet();
}
}
}

public void testMultiSearchIndicesWithIndexRefreshBlocks() {
List<String> indices = createIndices();
Map<String, Integer> numDocsPerIndex = indexDocuments(indices);
List<String> unblockedIndices = addIndexRefreshBlockToSomeIndices(indices);

int expectedHits = 0;
for (String index : unblockedIndices) {
expectedHits += numDocsPerIndex.get(index);
}

final long expectedHitsL = expectedHits;
assertResponse(
client().prepareMultiSearch()
.add(prepareSearch().setQuery(QueryBuilders.matchAllQuery()))
.add(prepareSearch().setQuery(QueryBuilders.termQuery("field", "blah"))),
response -> {
assertHitCount(Objects.requireNonNull(response.getResponses()[0].getResponse()), expectedHitsL);
assertHitCount(Objects.requireNonNull(response.getResponses()[1].getResponse()), 0);
}
);
}

private List<String> createIndices() {
int numIndices = randomIntBetween(1, 3);
List<String> indices = new ArrayList<>();
for (int i = 0; i < numIndices; i++) {
indices.add("test" + i);
createIndex("test" + i);
}
return indices;
}

private Map<String, Integer> indexDocuments(List<String> indices) {
Map<String, Integer> numDocsPerIndex = new HashMap<>();
List<IndexRequestBuilder> indexRequests = new ArrayList<>();
for (String index : indices) {
int numDocs = randomIntBetween(0, 10);
numDocsPerIndex.put(index, numDocs);
for (int i = 0; i < numDocs; i++) {
indexRequests.add(prepareIndex(index).setId(String.valueOf(i)).setSource("field", "value"));
}
}
indexRandom(true, indexRequests);

return numDocsPerIndex;
}

private List<String> addIndexRefreshBlockToSomeIndices(List<String> indices) {
List<String> unblockedIndices = new ArrayList<>();
var blocksBuilder = ClusterBlocks.builder().blocks(EMPTY_CLUSTER_BLOCK);
for (String index : indices) {
boolean blockIndex = randomBoolean();
if (blockIndex) {
blocksBuilder.addIndexBlock(ProjectId.DEFAULT, index, IndexMetadata.INDEX_REFRESH_BLOCK);
} else {
unblockedIndices.add(index);
}
}

var dataNodes = clusterService().state().getNodes().getAllNodes();
for (DiscoveryNode dataNode : dataNodes) {
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, dataNode.getName());
ClusterState currentState = clusterService.state();
ClusterState newState = ClusterState.builder(currentState).blocks(blocksBuilder).build();
setState(clusterService, newState);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This method is not intended to be used in integration test as it overrides the current data node cluster state.

For testing the INDEX_REFRESH_BLOCK I think it makes sense to only have unit tests in stateful elasticsearch.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry @tlrx, can you explain more the risks of doing this?

I like having fine-grained control over blocks so I can write tests that block some indices and allow others in one search - I think this is critical to test. If I can only 'set' the block by controlling active search nodes (like I do in the other PR), I can't think of a way to achieve what I want.

}

return unblockedIndices;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,7 @@ public void onFailure(Exception e) {}
);
}

static void adjustSearchType(SearchRequest searchRequest, boolean singleShard) {
static void adjustSearchType(SearchRequest searchRequest, boolean oneOrZeroShards) {
// if there's a kNN search, always use DFS_QUERY_THEN_FETCH
if (searchRequest.hasKnnSearch()) {
searchRequest.searchType(DFS_QUERY_THEN_FETCH);
Expand All @@ -604,7 +604,7 @@ static void adjustSearchType(SearchRequest searchRequest, boolean singleShard) {
}

// optimize search type for cases where there is only one shard group to search on
if (singleShard) {
if (oneOrZeroShards) {
// if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_THEN_FETCH);
}
Expand Down Expand Up @@ -1305,7 +1305,7 @@ private void executeSearch(

Map<String, Float> concreteIndexBoosts = resolveIndexBoosts(searchRequest, projectState.cluster());

adjustSearchType(searchRequest, shardIterators.size() == 1);
adjustSearchType(searchRequest, shardIterators.size() <= 1);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this change required? Is it necessary to adjust the search type where there's no shards to query?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah good question. I just tested it - looks like no. I had made the change to avoid a DfsQueryPhase error I was getting during the previous approach.

I think it's a meaningless change now, if there are no shards the search will end in AbstractSearchAsyncAction#start. I've removed it.


final DiscoveryNodes nodes = projectState.cluster().nodes();
BiFunction<String, String, Transport.Connection> connectionLookup = buildConnectionLookup(
Expand Down Expand Up @@ -1864,6 +1864,7 @@ List<SearchShardIterator> getLocalShardsIterator(
Set<ResolvedExpression> indicesAndAliases,
String[] concreteIndices
) {
concreteIndices = ignoreBlockedIndices(projectState, concreteIndices);
var routingMap = indexNameExpressionResolver.resolveSearchRouting(
projectState.metadata(),
searchRequest.routing(),
Expand Down Expand Up @@ -1896,6 +1897,20 @@ List<SearchShardIterator> getLocalShardsIterator(
return Arrays.asList(list);
}

static String[] ignoreBlockedIndices(ProjectState projectState, String[] concreteIndices) {
// optimization: mostly we do not have any blocks so there's no point in the expensive per-index checking
boolean hasIndexBlocks = projectState.blocks().indices(projectState.projectId()).isEmpty() == false;
if (hasIndexBlocks) {
return Arrays.stream(concreteIndices)
.filter(
index -> projectState.blocks()
.hasIndexBlock(projectState.projectId(), index, IndexMetadata.INDEX_REFRESH_BLOCK) == false
)
.toArray(String[]::new);
}
return concreteIndices;
}

private interface TelemetryListener {
void setRemotes(int count);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1812,4 +1812,35 @@ public void onFailure(Exception ex) {
assertTrue(ESTestCase.terminate(threadPool));
}
}

public void testIgnoreBlockedIndices() {
int numIndices = randomIntBetween(1, 10);
String[] concreteIndices = new String[numIndices];
for (int i = 0; i < numIndices; i++) {
concreteIndices[i] = "index" + i;
}

List<String> shuffledIndices = Arrays.asList(concreteIndices);
Collections.shuffle(shuffledIndices, random());
concreteIndices = shuffledIndices.toArray(new String[0]);

final ProjectId projectId = randomProjectIdOrDefault();
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder();
int numBlockedIndices = randomIntBetween(0, numIndices);
for (int i = 0; i < numBlockedIndices; i++) {
blocksBuilder.addIndexBlock(projectId, concreteIndices[i], IndexMetadata.INDEX_REFRESH_BLOCK);
}
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(ProjectMetadata.builder(projectId).build())
.blocks(blocksBuilder)
.build();
final ProjectState projectState = clusterState.projectState(projectId);

String[] actual = TransportSearchAction.ignoreBlockedIndices(projectState, concreteIndices);
String[] expected = Arrays.stream(concreteIndices)
.filter(index -> clusterState.blocks().hasIndexBlock(projectId, index, IndexMetadata.INDEX_REFRESH_BLOCK) == false)
.toArray(String[]::new);

assertThat(Arrays.asList(actual), containsInAnyOrder(expected));
}
}