diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithIndexBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithIndexBlocksIT.java new file mode 100644 index 0000000000000..a5c99418189f0 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchWithIndexBlocksIT.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.search; + +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; +import org.elasticsearch.action.admin.indices.readonly.TransportAddIndexBlockAction; +import org.elasticsearch.action.search.ClosePointInTimeRequest; +import org.elasticsearch.action.search.OpenPointInTimeRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +public class SearchWithIndexBlocksIT extends ESIntegTestCase { + + public void testSearchIndexWithIndexRefreshBlock() { + createIndex("test"); + + var addIndexBlockRequest = new AddIndexBlockRequest(IndexMetadata.APIBlock.REFRESH, "test"); + client().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest).actionGet(); + + indexRandom( + true, + prepareIndex("test").setId("1").setSource("field", "value"), + prepareIndex("test").setId("2").setSource("field", "value"), + prepareIndex("test").setId("3").setSource("field", "value"), + prepareIndex("test").setId("4").setSource("field", "value"), + prepareIndex("test").setId("5").setSource("field", "value"), + prepareIndex("test").setId("6").setSource("field", "value") + ); + + assertHitCount(prepareSearch().setQuery(QueryBuilders.matchAllQuery()), 0); + } + + public void testSearchMultipleIndicesEachWithAnIndexRefreshBlock() { + createIndex("test"); + createIndex("test2"); + + var addIndexBlockRequest = new AddIndexBlockRequest(IndexMetadata.APIBlock.REFRESH, "test", "test2"); + client().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest).actionGet(); + + indexRandom( + true, + prepareIndex("test").setId("1").setSource("field", "value"), + prepareIndex("test").setId("2").setSource("field", "value"), + prepareIndex("test").setId("3").setSource("field", "value"), + prepareIndex("test").setId("4").setSource("field", "value"), + prepareIndex("test").setId("5").setSource("field", "value"), + prepareIndex("test").setId("6").setSource("field", "value"), + prepareIndex("test2").setId("1").setSource("field", "value"), + prepareIndex("test2").setId("2").setSource("field", "value"), + prepareIndex("test2").setId("3").setSource("field", "value"), + prepareIndex("test2").setId("4").setSource("field", "value"), + prepareIndex("test2").setId("5").setSource("field", "value"), + prepareIndex("test2").setId("6").setSource("field", "value") + ); + + assertHitCount(prepareSearch().setQuery(QueryBuilders.matchAllQuery()), 0); + } + + public void testSearchMultipleIndicesWithOneIndexRefreshBlock() { + createIndex("test"); + createIndex("test2"); + + // Only block test + var addIndexBlockRequest = new AddIndexBlockRequest(IndexMetadata.APIBlock.REFRESH, "test"); + client().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest).actionGet(); + + indexRandom( + true, + prepareIndex("test").setId("1").setSource("field", "value"), + prepareIndex("test").setId("2").setSource("field", "value"), + prepareIndex("test").setId("3").setSource("field", "value"), + prepareIndex("test").setId("4").setSource("field", "value"), + prepareIndex("test").setId("5").setSource("field", "value"), + prepareIndex("test").setId("6").setSource("field", "value"), + prepareIndex("test2").setId("1").setSource("field", "value"), + prepareIndex("test2").setId("2").setSource("field", "value"), + prepareIndex("test2").setId("3").setSource("field", "value"), + prepareIndex("test2").setId("4").setSource("field", "value"), + prepareIndex("test2").setId("5").setSource("field", "value"), + prepareIndex("test2").setId("6").setSource("field", "value") + ); + + // We should get test2 results (not blocked) + assertHitCount(prepareSearch().setQuery(QueryBuilders.matchAllQuery()), 6); + } + + public void testOpenPITWithIndexRefreshBlock() { + createIndex("test"); + + var addIndexBlockRequest = new AddIndexBlockRequest(IndexMetadata.APIBlock.REFRESH, "test"); + client().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest).actionGet(); + + indexRandom( + true, + prepareIndex("test").setId("1").setSource("field", "value"), + prepareIndex("test").setId("2").setSource("field", "value"), + prepareIndex("test").setId("3").setSource("field", "value"), + prepareIndex("test").setId("4").setSource("field", "value"), + prepareIndex("test").setId("5").setSource("field", "value"), + prepareIndex("test").setId("6").setSource("field", "value") + ); + + BytesReference pitId = null; + try { + OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueSeconds(10)) + .allowPartialSearchResults(true); + pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); + SearchRequest searchRequest = new SearchRequest().source( + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitId).setKeepAlive(TimeValue.timeValueSeconds(10))) + ); + assertHitCount(client().search(searchRequest), 0); + } finally { + if (pitId != null) { + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index e42f8127c5e97..8adb9180e3bae 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -186,6 +186,12 @@ private void runCoordinatorRewritePhase() { assert assertSearchCoordinationThread(); final List matchedShardLevelRequests = new ArrayList<>(); for (SearchShardIterator searchShardIterator : shardsIts) { + if (searchShardIterator.prefiltered() == false && searchShardIterator.skip()) { + // This implies the iterator was skipped due to an index level block, + // not a remote can-match run. + continue; + } + final CanMatchNodeRequest canMatchNodeRequest = new CanMatchNodeRequest( request, searchShardIterator.getOriginalIndices().indicesOptions(), diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java b/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java index 00ff8f33f5659..53d5e1f5b717f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java @@ -41,7 +41,7 @@ public final class SearchShardIterator implements ComparableshardId. + * for a given shardId. * * @param clusterAlias the alias of the cluster where the shard is located * @param shardId shard id of the group @@ -54,6 +54,28 @@ public SearchShardIterator(@Nullable String clusterAlias, ShardId shardId, List< /** * Creates a {@link SearchShardIterator} instance that iterates over a subset of the given shards + * for a given shardId. + * + * @param clusterAlias the alias of the cluster where the shard is located + * @param shardId shard id of the group + * @param shards shards to iterate + * @param originalIndices the indices that the search request originally related to (before any rewriting happened) + * @param skip if true, then this group won't have matches (due to an index level block), + * and it can be safely skipped from the search + */ + public SearchShardIterator( + @Nullable String clusterAlias, + ShardId shardId, + List shards, + OriginalIndices originalIndices, + boolean skip + ) { + this(clusterAlias, shardId, shards.stream().map(ShardRouting::currentNodeId).toList(), originalIndices, null, null, false, skip); + } + + /** + * Creates a {@link SearchShardIterator} instance that iterates over a subset of the given shards + * for a given shardId. * * @param clusterAlias the alias of the cluster where the shard is located * @param shardId shard id of the group @@ -62,7 +84,8 @@ public SearchShardIterator(@Nullable String clusterAlias, ShardId shardId, List< * @param searchContextId the point-in-time specified for this group if exists * @param searchContextKeepAlive the time interval that data nodes should extend the keep alive of the point-in-time * @param prefiltered if true, then this group already executed the can_match phase - * @param skip if true, then this group won't have matches, and it can be safely skipped from the search + * @param skip if true, then this group won't have matches (due to can match, or an index level block), + * and it can be safely skipped from the search */ public SearchShardIterator( @Nullable String clusterAlias, @@ -83,7 +106,6 @@ public SearchShardIterator( assert searchContextKeepAlive == null || searchContextId != null; this.prefiltered = prefiltered; this.skip = skip; - assert skip == false || prefiltered : "only prefiltered shards are skip-able"; } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 8d704853a5f8e..2a89faf8da31f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -148,6 +148,8 @@ public class TransportSearchAction extends HandledTransportAction buildPerIndexOriginalIndices( for (String index : indices) { if (hasBlocks) { blocks.indexBlockedRaiseException(projectState.projectId(), ClusterBlockLevel.READ, index); + if (blocks.hasIndexBlock(projectState.projectId(), index, IndexMetadata.INDEX_REFRESH_BLOCK)) { + res.put(index, SKIPPED_INDICES); + continue; + } } String[] aliases = indexNameExpressionResolver.allIndexAliases(projectState.metadata(), index, indicesAndAliases); @@ -588,7 +594,7 @@ public void onFailure(Exception e) {} ); } - static void adjustSearchType(SearchRequest searchRequest, boolean singleShard) { + static void adjustSearchType(SearchRequest searchRequest, boolean oneOrZeroValidShards) { // if there's a kNN search, always use DFS_QUERY_THEN_FETCH if (searchRequest.hasKnnSearch()) { searchRequest.searchType(DFS_QUERY_THEN_FETCH); @@ -603,7 +609,7 @@ static void adjustSearchType(SearchRequest searchRequest, boolean singleShard) { } // optimize search type for cases where there is only one shard group to search on - if (singleShard) { + if (oneOrZeroValidShards) { // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard searchRequest.searchType(QUERY_THEN_FETCH); } @@ -1304,7 +1310,8 @@ private void executeSearch( Map concreteIndexBoosts = resolveIndexBoosts(searchRequest, projectState.cluster()); - adjustSearchType(searchRequest, shardIterators.size() == 1); + boolean oneOrZeroValidShards = shardIterators.size() == 1 || allOrAllButOneSkipped(shardIterators); + adjustSearchType(searchRequest, oneOrZeroValidShards); final DiscoveryNodes nodes = projectState.cluster().nodes(); BiFunction connectionLookup = buildConnectionLookup( @@ -1337,6 +1344,30 @@ private void executeSearch( ); } + /** + * Determines if all, or all but one, iterators are skipped. + * (At this point, iterators may be marked as skipped due to index level blockers). + * We expect skipped iteators to be unlikely, so returning fast after we see more + * than one "not skipped" is an intended optimization. + * + * @param searchShardIterators all the shard iterators derived from indices being searched + * @return true if all of them are already skipped, or only one is not skipped + */ + private boolean allOrAllButOneSkipped(List searchShardIterators) { + int notSkippedCount = 0; + + for (SearchShardIterator searchShardIterator : searchShardIterators) { + if (searchShardIterator.skip() == false) { + notSkippedCount++; + if (notSkippedCount > 1) { + return false; + } + } + } + + return true; + } + Executor asyncSearchExecutor(final String[] indices) { boolean seenSystem = false; boolean seenCritical = false; @@ -1889,7 +1920,13 @@ List getLocalShardsIterator( final ShardId shardId = shardRouting.shardId(); OriginalIndices finalIndices = originalIndices.get(shardId.getIndex().getName()); assert finalIndices != null; - list[i++] = new SearchShardIterator(clusterAlias, shardId, shardRouting.getShardRoutings(), finalIndices); + list[i++] = new SearchShardIterator( + clusterAlias, + shardId, + shardRouting.getShardRoutings(), + finalIndices, + finalIndices == SKIPPED_INDICES + ); } // the returned list must support in-place sorting, so this is the most memory efficient we can do here return Arrays.asList(list); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 59ee04414b3fc..67cd3fe8b47fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -283,7 +283,8 @@ public enum APIBlock implements Writeable { READ("read", INDEX_READ_BLOCK, Property.ServerlessPublic), WRITE("write", INDEX_WRITE_BLOCK, Property.ServerlessPublic), METADATA("metadata", INDEX_METADATA_BLOCK, Property.ServerlessPublic), - READ_ONLY_ALLOW_DELETE("read_only_allow_delete", INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); + READ_ONLY_ALLOW_DELETE("read_only_allow_delete", INDEX_READ_ONLY_ALLOW_DELETE_BLOCK), + REFRESH("refresh", INDEX_REFRESH_BLOCK); final String name; final String settingName; diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 3c5dc6b39292c..1fb28725ba4f0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -41,6 +41,8 @@ import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.project.TestProjectResolvers; import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; @@ -133,6 +135,8 @@ import static org.hamcrest.Matchers.hasSize; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -1812,4 +1816,104 @@ public void onFailure(Exception ex) { assertTrue(ESTestCase.terminate(threadPool)); } } + + public void testSkippedIteratorsForIndicesWithRefreshBlock() { + final ProjectId projectId = randomProjectIdOrDefault(); + + String normalIndexName = "test-normal"; + String blockedIndexName = "test-blocked"; + final String[] indexNames = { normalIndexName, blockedIndexName }; + final Index normalIndex = new Index(normalIndexName, UUIDs.randomBase64UUID()); + final Index blockedIndex = new Index(blockedIndexName, UUIDs.randomBase64UUID()); + final int numberOfShards = randomIntBetween(1, 3); + final int numberOfReplicas = randomIntBetween(0, 1); + final int totalShards = numberOfShards + numberOfShards * numberOfReplicas; + + ClusterState clusterState = ClusterStateCreationUtils.stateWithAssignedPrimariesAndReplicas( + projectId, + indexNames, + numberOfShards, + numberOfReplicas + ); + ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder().blocks(clusterState.blocks()); + blocksBuilder.addIndexBlock(projectId, "test-blocked", IndexMetadata.INDEX_REFRESH_BLOCK); + clusterState = ClusterState.builder(clusterState).blocks(blocksBuilder).build(); + List shardIts = new ArrayList<>(); + for (int i = 0; i < totalShards; i++) { + shardIts.add(new ShardIterator(new ShardId(normalIndex, randomInt()), Collections.emptyList())); + shardIts.add(new ShardIterator(new ShardId(blockedIndex, randomInt()), Collections.emptyList())); + } + final OperationRouting operationRouting = mock(OperationRouting.class); + when( + operationRouting.searchShards( + eq(clusterState.projectState(projectId)), + eq(indexNames), + any(), + nullable(String.class), + any(), + any() + ) + ).thenReturn(shardIts); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(clusterState); + when(clusterService.getSettings()).thenReturn(Settings.EMPTY); + when(clusterService.operationRouting()).thenReturn(operationRouting); + + Settings settings = Settings.builder().put("node.name", TransportSearchAction.class.getSimpleName()).build(); + TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP, new DefaultBuiltInExecutorBuilders()); + try { + TransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + VersionInformation.CURRENT, + transportVersion, + threadPool + ); + NodeClient client = new NodeClient(settings, threadPool); + SearchService searchService = mock(SearchService.class); + when(searchService.getRewriteContext(any(), any(), any(), anyBoolean())).thenReturn( + new QueryRewriteContext(null, null, null, null, null, null) + ); + + TransportSearchAction transportSearchAction = new TransportSearchAction( + threadPool, + new NoneCircuitBreakerService(), + transportService, + searchService, + null, + new SearchTransportService(transportService, client, null), + null, + clusterService, + new ActionFilters(Collections.emptySet()), + TestProjectResolvers.usingRequestHeader(threadPool.getThreadContext()), + TestIndexNameExpressionResolver.newInstance(threadPool.getThreadContext()), + null, + null, + new SearchResponseMetrics(TelemetryProvider.NOOP.getMeterRegistry()), + client, + new UsageService() + ); + + SearchRequest searchRequest = new SearchRequest(indexNames); + searchRequest.allowPartialSearchResults(true); + List searchShardIts = transportSearchAction.getLocalShardsIterator( + clusterState.projectState(projectId), + searchRequest, + searchRequest.getLocalClusterAlias(), + new HashSet<>(), + indexNames + ); + + assertThat(searchShardIts.size(), equalTo(shardIts.size())); + for (SearchShardIterator searchShardIt : searchShardIts) { + if (searchShardIt.skip()) { + assertThat(searchShardIt.shardId().getIndexName(), equalTo("test-blocked")); + } else { + assertThat(searchShardIt.shardId().getIndexName(), equalTo("test-normal")); + } + } + } finally { + assertTrue(ESTestCase.terminate(threadPool)); + } + } }