diff --git a/CHANGELOG.md b/CHANGELOG.md index 2469077a8f6b0..3d60fb9df9832 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Update to `almalinux:10` ([#20482](https://github.com/opensearch-project/OpenSearch/pull/20482)) - Add X-Request-Id to uniquely identify a search request ([#19798](https://github.com/opensearch-project/OpenSearch/pull/19798)) - Added TopN selection logic for streaming terms aggregations ([#20481](https://github.com/opensearch-project/OpenSearch/pull/20481)) +- Added support for Intra Segment Search ([#19704](https://github.com/opensearch-project/OpenSearch/pull/19704)) ### Changed - Handle custom metadata files in subdirectory-store ([#20157](https://github.com/opensearch-project/OpenSearch/pull/20157)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SpanNearQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SpanNearQueryIT.java new file mode 100644 index 0000000000000..5b591693415fc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SpanNearQueryIT.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.index.query.QueryBuilders.spanNearQuery; +import static org.opensearch.index.query.QueryBuilders.spanTermQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +/** + * Integration tests for span_near queries with concurrent segment search and partition strategies. + */ +public class SpanNearQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SpanNearQueryIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.getKey(), "segment").build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { + Settings.builder() + .put(CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.getKey(), "force") + .put(CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE.getKey(), 1000) + .build() }, + new Object[] { + Settings.builder() + .put(CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.getKey(), "balanced") + .put(CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE.getKey(), 1000) + .build() } + ); + } + + public void testSpanNearQuery() throws Exception { + createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()); + int totalDocs = 2500; + for (int i = 0; i < totalDocs; i++) { + String content = (i % 100 == 0) ? "alpha beta" : (i % 50 == 0) ? "alpha gamma beta" : "other words " + i; + client().prepareIndex("test").setId(String.valueOf(i)).setSource("field", content).get(); + } + refresh(); + forceMerge(1); + indexRandomForConcurrentSearch("test"); + SearchResponse response = client().prepareSearch("test") + .setQuery(spanNearQuery(spanTermQuery("field", "alpha"), 0).addClause(spanTermQuery("field", "beta")).inOrder(true)) + .get(); + assertHitCount(response, 25L); + response = client().prepareSearch("test") + .setQuery(spanNearQuery(spanTermQuery("field", "alpha"), 1).addClause(spanTermQuery("field", "beta")).inOrder(true)) + .get(); + assertHitCount(response, 50L); + } +} diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 873e5c5cd28ef..9b3fd09d041c6 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -811,6 +811,8 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, // deprecated SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING, SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE, + SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY, + SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE, RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 839888fcf9f86..2dc33b3017572 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -816,6 +816,36 @@ public static IndexMergePolicy fromString(String text) { Property.IndexScope ); + // Partition strategy constants + public static final String CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_SEGMENT = "segment"; + public static final String CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_BALANCED = "balanced"; + public static final String CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_FORCE = "force"; + + public static final Setting INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY = Setting.simpleString( + "index.search.concurrent_segment_search.partition_strategy", + CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_SEGMENT, + value -> { + switch (value) { + case CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_SEGMENT: + case CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_BALANCED: + case CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_FORCE: + break; + default: + throw new IllegalArgumentException("Setting value must be one of [segment, balanced, force]"); + } + }, + Property.Dynamic, + Property.IndexScope + ); + + public static final Setting INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE = Setting.intSetting( + "index.search.concurrent_segment_search.partition_min_segment_size", + 500_000, + 1000, + Property.Dynamic, + Property.IndexScope + ); + public static final Setting INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING = Setting.boolSetting( "index.optimize_doc_id_lookup.fuzzy_set.enabled", false, diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java index f52b393202d28..40211342f823e 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java @@ -118,4 +118,13 @@ default void visit(QueryBuilderVisitor visitor) { visitor.accept(this); }; + /** + * Indicates whether this query benefits from intra-segment search. + * Override to return {@code true} for compute-heavy queries that parallelize well + * Default is {@code false} - queries must explicitly opt-in. + */ + default boolean supportsIntraSegmentSearch() { + return false; + } + } diff --git a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java index 2912a5cb09276..eef1f92976b3d 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java @@ -311,6 +311,11 @@ public void visit(QueryBuilderVisitor visitor) { } } + @Override + public boolean supportsIntraSegmentSearch() { + return true; + } + /** * SpanGapQueryBuilder enables gaps in a SpanNearQuery. * Since, SpanGapQuery is private to SpanNearQuery, SpanGapQueryBuilder cannot diff --git a/server/src/main/java/org/opensearch/index/query/SpanTermQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanTermQueryBuilder.java index a6108578da06c..90f31cd68d9a6 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanTermQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanTermQueryBuilder.java @@ -155,4 +155,9 @@ public static SpanTermQueryBuilder fromXContent(XContentParser parser) throws IO public String getWriteableName() { return NAME; } + + @Override + public boolean supportsIntraSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 5e2d93a9af31e..cf55ef0518463 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -83,6 +83,8 @@ import org.opensearch.search.deciders.ConcurrentSearchDecision; import org.opensearch.search.deciders.ConcurrentSearchRequestDecider; import org.opensearch.search.deciders.ConcurrentSearchVisitor; +import org.opensearch.search.deciders.IntraSegmentSearchDecider; +import org.opensearch.search.deciders.IntraSegmentSearchVisitor; import org.opensearch.search.dfs.DfsSearchResult; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.fetch.FetchSearchResult; @@ -106,6 +108,7 @@ import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.slice.SliceBuilder; import org.opensearch.search.sort.SortAndFormats; +import org.opensearch.search.startree.StarTreeQueryHelper; import org.opensearch.search.streaming.FlushMode; import org.opensearch.search.suggest.SuggestionSearchContext; @@ -132,6 +135,9 @@ import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_SEGMENT; import static org.opensearch.search.SearchService.KEYWORD_INDEX_OR_DOC_VALUES_ENABLED; import static org.opensearch.search.SearchService.MAX_AGGREGATION_REWRITE_FILTERS; import static org.opensearch.search.streaming.FlushModeResolver.STREAMING_MAX_ESTIMATED_BUCKET_COUNT; @@ -220,6 +226,7 @@ final class DefaultSearchContext extends SearchContext { private final Function requestToAggReduceContextBuilder; private final String concurrentSearchMode; private final SetOnce requestShouldUseConcurrentSearch = new SetOnce<>(); + private final SetOnce requestShouldUseIntraSegmentSearch = new SetOnce<>(); private final int maxAggRewriteFilters; private final int filterRewriteSegmentThreshold; private final int cardinalityAggregationPruningThreshold; @@ -1036,13 +1043,23 @@ private boolean evaluateAutoMode() { logger.debug("request has supported aggregations, using concurrent search"); } return true; - - } else { - if (logger.isDebugEnabled()) { - logger.debug("request does not have aggregations, not using concurrent search"); + } else if (CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_SEGMENT.equals(getPartitionStrategy()) == false + && request().source() != null + && request().source().query() != null + && request().source().query().supportsIntraSegmentSearch()) { + if (logger.isDebugEnabled()) { + logger.debug( + "request query supports intra-segment search, using concurrent search with partition strategy: {}", + getPartitionStrategy() + ); + } + return true; + } else { + if (logger.isDebugEnabled()) { + logger.debug("request does not have aggregations, not using concurrent search"); + } + return false; } - return false; - } } else { if (logger.isDebugEnabled()) { @@ -1332,4 +1349,71 @@ public double getStreamingMinCardinalityRatio() { public long getStreamingMinEstimatedBucketCount() { return clusterService.getClusterSettings().get(STREAMING_MIN_ESTIMATED_BUCKET_COUNT); } + + /** + * Returns the partition strategy for this search context. + */ + @Override + public String getPartitionStrategy() { + return indexService.getIndexSettings() + .getSettings() + .get( + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.getKey(), + clusterService.getClusterSettings().get(CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY) + ); + } + + /** + * Returns the minimum segment size required for balanced partitioning. + */ + @Override + public int getPartitionMinSegmentSize() { + return indexService.getIndexSettings() + .getSettings() + .getAsInt( + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE.getKey(), + clusterService.getClusterSettings().get(CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE) + ); + } + + /** + * Returns intra-segment search status for the search context. + */ + @Override + public boolean shouldUseIntraSegmentSearch() { + return Boolean.TRUE.equals(requestShouldUseIntraSegmentSearch.get()); + } + + /** + * Evaluate if request should use intra-segment search based on partition strategy and query/aggregation analysis. + */ + public void evaluateRequestShouldUseIntraSegmentSearch() { + String partitionStrategy = getPartitionStrategy(); + if (CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_SEGMENT.equals(partitionStrategy) || shouldUseConcurrentSearch() == false) { + requestShouldUseIntraSegmentSearch.set(false); + return; + } + // StarTree precomputes aggregations at index time - intra-segment adds no benefit + if (aggregations() != null && StarTreeQueryHelper.getSupportedStarTree(getQueryShardContext()) != null) { + logger.debug("partition strategy decision: StarTree detected, disabling intra-segment"); + requestShouldUseIntraSegmentSearch.set(false); + return; + } + IntraSegmentSearchDecider decider = new IntraSegmentSearchDecider(); + if (request().source() != null && request().source().query() != null) { + IntraSegmentSearchVisitor visitor = new IntraSegmentSearchVisitor(decider); + request().source().query().visit(visitor); + } + if (aggregations() != null && aggregations().factories() != null) { + decider.evaluateForAggregations(aggregations().factories()); + } + boolean result = decider.shouldUseIntraSegmentSearch(); + logger.debug( + "partition strategy decision: strategy={}, useIntraSegment={}, reason={}", + partitionStrategy, + result, + decider.getReason() + ); + requestShouldUseIntraSegmentSearch.set(result); + } } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 33f36951fc411..7b694c24d7194 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -341,6 +341,39 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.Dynamic, Property.NodeScope ); + + // Partition strategy constants + public static final String CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_SEGMENT = "segment"; + public static final String CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_BALANCED = "balanced"; + public static final String CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_FORCE = "force"; + + // Partition strategy setting + public static final Setting CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY = Setting.simpleString( + "search.concurrent_segment_search.partition_strategy", + CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_BALANCED, + value -> { + switch (value) { + case CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_SEGMENT: + case CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_BALANCED: + case CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_FORCE: + break; + default: + throw new IllegalArgumentException("Setting value must be one of [segment, balanced, force]"); + } + }, + Property.Dynamic, + Property.NodeScope + ); + + // Minimum segment size for balanced partitioning (only applies when partition_strategy = balanced) + public static final Setting CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE = Setting.intSetting( + "search.concurrent_segment_search.partition_min_segment_size", + 500_000, + 1000, + Property.Dynamic, + Property.NodeScope + ); + // value 0 means rewrite filters optimization in aggregations will be disabled @ExperimentalApi public static final Setting MAX_AGGREGATION_REWRITE_FILTERS = Setting.intSetting( @@ -1532,6 +1565,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc // nothing to parse... if (source == null) { context.evaluateRequestShouldUseConcurrentSearch(); + context.evaluateRequestShouldUseIntraSegmentSearch(); return; } @@ -1723,6 +1757,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.collapse(collapseContext); } context.evaluateRequestShouldUseConcurrentSearch(); + context.evaluateRequestShouldUseIntraSegmentSearch(); if (source.profile()) { final Function>> pluginProfileMetricsSupplier = (query) -> pluginProfilers.stream() .flatMap(p -> p.getQueryProfileMetrics(context, query).stream()) diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java index 8b35a3bbfe8d7..65c9eafbbe328 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java @@ -274,6 +274,15 @@ public boolean allFactoriesSupportConcurrentSearch() { return true; } + public boolean allFactoriesSupportIntraSegmentSearch() { + for (AggregatorFactory factory : factories) { + if (factory.supportsIntraSegmentSearch() == false || factory.evaluateChildFactoriesForIntraSegment() == false) { + return false; + } + } + return true; + } + /** * Create all aggregators so that they can be consumed with multiple * buckets. diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java index 86fbb46a9ad3c..500f7c7145bab 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java @@ -124,10 +124,22 @@ protected boolean supportsConcurrentSegmentSearch() { return false; } + /** + * Implementation should override this method and return true if the Aggregator benefits from intra-segment search + * Default is false - aggregations must explicitly opt-in + */ + protected boolean supportsIntraSegmentSearch() { + return false; + } + public boolean evaluateChildFactories() { return factories.allFactoriesSupportConcurrentSearch(); } + public boolean evaluateChildFactoriesForIntraSegment() { + return factories.allFactoriesSupportIntraSegmentSearch(); + } + public AggregatorFactories getSubFactories() { return factories; } diff --git a/server/src/main/java/org/opensearch/search/deciders/IntraSegmentSearchDecider.java b/server/src/main/java/org/opensearch/search/deciders/IntraSegmentSearchDecider.java new file mode 100644 index 0000000000000..3b37664f3e716 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/deciders/IntraSegmentSearchDecider.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.deciders; + +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.search.aggregations.AggregatorFactories; + +/** + * Evaluates whether queries and aggregations support intra-segment search. + */ +public class IntraSegmentSearchDecider { + + private boolean querySupport = true; + private boolean aggSupport = true; + private String reason = "no query or aggregation evaluated"; + private boolean hasQuery = false; + private boolean hasAggregations = false; + + public void evaluateForQuery(QueryBuilder queryBuilder) { + hasQuery = true; + if (queryBuilder.supportsIntraSegmentSearch() == false) { + querySupport = false; + reason = queryBuilder.getName() + " does not support intra-segment search"; + } + } + + public void evaluateForAggregations(AggregatorFactories aggregations) { + if (aggregations == null) { + return; + } + hasAggregations = true; + if (aggregations.allFactoriesSupportIntraSegmentSearch() == false) { + aggSupport = false; + reason = "some aggregations do not support intra-segment search"; + } + } + + public boolean shouldUseIntraSegmentSearch() { + if (hasQuery && querySupport == false) { + return false; + } + if (hasAggregations && aggSupport == false) { + return false; + } + return hasQuery || hasAggregations; + } + + public String getReason() { + if (shouldUseIntraSegmentSearch()) { + return "query/aggregations support intra-segment search"; + } + return reason; + } +} diff --git a/server/src/main/java/org/opensearch/search/deciders/IntraSegmentSearchVisitor.java b/server/src/main/java/org/opensearch/search/deciders/IntraSegmentSearchVisitor.java new file mode 100644 index 0000000000000..db07494f3c5ac --- /dev/null +++ b/server/src/main/java/org/opensearch/search/deciders/IntraSegmentSearchVisitor.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.deciders; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; + +/** + * Visitor to traverse QueryBuilder tree and invoke IntraSegmentSearchDecider + * for each query node. + */ +public class IntraSegmentSearchVisitor implements QueryBuilderVisitor { + + private final IntraSegmentSearchDecider decider; + + public IntraSegmentSearchVisitor(IntraSegmentSearchDecider decider) { + this.decider = decider; + } + + @Override + public void accept(QueryBuilder qb) { + decider.evaluateForQuery(qb); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return this; + } +} diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index 0a3d894928121..07b3fb3d3d739 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -577,7 +577,24 @@ public CollectionStatistics collectionStatistics(String field) throws IOExceptio */ @Override protected LeafSlice[] slices(List leaves) { - return slicesInternal(leaves, searchContext.getTargetMaxSliceCount()); + if (leaves == null || leaves.isEmpty()) { + return new LeafSlice[0]; + } + int targetMaxSlice = searchContext.getTargetMaxSliceCount(); + if (targetMaxSlice == 0) { + LeafSlice[] leafSlices = super.slices(leaves); + logger.debug("Slice count using lucene default [{}]", leafSlices.length); + return leafSlices; + } + LeafSlice[] leafSlices = MaxTargetSliceSupplier.getSlices( + leaves, + targetMaxSlice, + searchContext.shouldUseIntraSegmentSearch(), + searchContext.getPartitionStrategy(), + searchContext.getPartitionMinSegmentSize() + ); + logger.debug("Slice count using max target slice supplier [{}]", leafSlices.length); + return leafSlices; } public DirectoryReader getDirectoryReader() { @@ -645,19 +662,4 @@ private boolean canMatchSearchAfter(LeafReaderContext ctx) throws IOException { } return true; } - - // package-private for testing - LeafSlice[] slicesInternal(List leaves, int targetMaxSlice) { - LeafSlice[] leafSlices; - if (targetMaxSlice == 0) { - // use the default lucene slice calculation - leafSlices = super.slices(leaves); - logger.debug("Slice count using lucene default [{}]", leafSlices.length); - } else { - // use the custom slice calculation based on targetMaxSlice - leafSlices = MaxTargetSliceSupplier.getSlices(leaves, targetMaxSlice); - logger.debug("Slice count using max target slice supplier [{}]", leafSlices.length); - } - return leafSlices; - } } diff --git a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java index 1660e5ac8343f..36e4dbcf15905 100644 --- a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java @@ -587,4 +587,19 @@ public boolean shouldUseTimeSeriesDescSortOptimization() { public boolean getStarTreeIndexEnabled() { return in.getStarTreeIndexEnabled(); } + + @Override + public String getPartitionStrategy() { + return in.getPartitionStrategy(); + } + + @Override + public int getPartitionMinSegmentSize() { + return in.getPartitionMinSegmentSize(); + } + + @Override + public boolean shouldUseIntraSegmentSearch() { + return in.shouldUseIntraSegmentSearch(); + } } diff --git a/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java b/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java index 51ebdc68ba099..f70fa4f315d97 100644 --- a/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java +++ b/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java @@ -10,12 +10,16 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.IndexSearcher.LeafReaderContextPartition; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; +import java.util.HashSet; import java.util.List; -import java.util.PriorityQueue; +import java.util.Set; + +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_FORCE; /** * Supplier to compute leaf slices based on passed in leaves and max target slice count to limit the number of computed slices. It sorts @@ -27,52 +31,163 @@ */ final class MaxTargetSliceSupplier { - static IndexSearcher.LeafSlice[] getSlices(List leaves, int targetMaxSlice) { + static IndexSearcher.LeafSlice[] getSlices( + List leaves, + int targetMaxSlice, + boolean useIntraSegmentSearch, + String partitionStrategy, + int minSegmentSize + ) { if (targetMaxSlice <= 0) { throw new IllegalArgumentException("MaxTargetSliceSupplier called with unexpected slice count of " + targetMaxSlice); } + if (leaves.isEmpty()) { + return new IndexSearcher.LeafSlice[0]; + } + if (useIntraSegmentSearch == false) { + return getSlicesWholeSegments(leaves, targetMaxSlice); + } else if (CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY_FORCE.equals(partitionStrategy)) { + return getSlicesWithForcePartitioning(leaves, targetMaxSlice); + } else { + return getSlicesWithAutoPartitioning(leaves, targetMaxSlice, minSegmentSize); + } + } - // slice count should not exceed the segment count - int targetSliceCount = Math.min(targetMaxSlice, leaves.size()); + /** + * Original method for whole segments + */ + static IndexSearcher.LeafSlice[] getSlicesWholeSegments(List leaves, int targetMaxSlice) { + List partitions = new ArrayList<>(leaves.size()); + for (LeafReaderContext leaf : leaves) { + partitions.add(LeafReaderContextPartition.createForEntireSegment(leaf)); + } + return distributePartitions(partitions, targetMaxSlice); + } - // Make a copy so we can sort: - List sortedLeaves = new ArrayList<>(leaves); + /** + * Balanced partitioning - partition segments exceeding fair slice share and min segment size. + */ + static IndexSearcher.LeafSlice[] getSlicesWithAutoPartitioning(List leaves, int targetMaxSlice, int minSegmentSize) { + long totalDocs = 0; + for (LeafReaderContext leaf : leaves) { + totalDocs += leaf.reader().maxDoc(); + } + long maxDocsPerPartition = (totalDocs + targetMaxSlice - 1) / targetMaxSlice; + List partitions = new ArrayList<>(Math.min(leaves.size() * 2, targetMaxSlice * 2)); + for (LeafReaderContext leaf : leaves) { + int segmentSize = leaf.reader().maxDoc(); + if (segmentSize > maxDocsPerPartition && segmentSize >= minSegmentSize) { + int numPartitions = (int) ((segmentSize + maxDocsPerPartition - 1) / maxDocsPerPartition); + addPartitions(partitions, leaf, Math.min(numPartitions, targetMaxSlice)); + } else { + partitions.add(LeafReaderContextPartition.createForEntireSegment(leaf)); + } + } + return distributePartitions(partitions, targetMaxSlice); + } - // Sort by maxDoc, descending: - sortedLeaves.sort(Collections.reverseOrder(Comparator.comparingInt(l -> l.reader().maxDoc()))); + /** + * Force partitioning - partition EVERY segment into available slices. + * Each segment is split into targetMaxSlice partitions regardless of size. + */ + static IndexSearcher.LeafSlice[] getSlicesWithForcePartitioning(List leaves, int targetMaxSlice) { + List partitions = new ArrayList<>(leaves.size() * targetMaxSlice); + for (LeafReaderContext leaf : leaves) { + int numPartitions = Math.min(targetMaxSlice, leaf.reader().maxDoc()); + addPartitions(partitions, leaf, numPartitions); + } + return distributePartitions(partitions, targetMaxSlice); + } - final List> groupedLeaves = new ArrayList<>(targetSliceCount); - for (int i = 0; i < targetSliceCount; ++i) { - groupedLeaves.add(new ArrayList<>()); + /** + * Creates partitions for a segment and adds them to the list. + */ + private static void addPartitions(List partitions, LeafReaderContext leaf, int numPartitions) { + int segmentSize = leaf.reader().maxDoc(); + if (numPartitions > 1) { + int docsPerPartition = segmentSize / numPartitions; + for (int i = 0; i < numPartitions; i++) { + int startDoc = i * docsPerPartition; + int endDoc = (i == numPartitions - 1) ? segmentSize : startDoc + docsPerPartition; + partitions.add(LeafReaderContextPartition.createFromAndTo(leaf, startDoc, endDoc)); + } + } else { + partitions.add(LeafReaderContextPartition.createForEntireSegment(leaf)); } + } - PriorityQueue groupQueue = new PriorityQueue<>(); - for (int i = 0; i < targetSliceCount; i++) { - groupQueue.offer(new Group(i)); + /** + * Distribute partitions using LPT algorithm while respecting Lucene's constraint + * that same-segment partitions must be in different slices. + */ + static IndexSearcher.LeafSlice[] distributePartitions(List partitions, int targetMaxSlice) { + if (partitions.isEmpty()) { + return new IndexSearcher.LeafSlice[0]; + } + int sliceCount = Math.min(targetMaxSlice, partitions.size()); + // Sort partitions by doc count descending + partitions.sort(Collections.reverseOrder(Comparator.comparingInt(MaxTargetSliceSupplier::getPartitionDocCount))); + GroupWithSegmentTracking[] slices = new GroupWithSegmentTracking[sliceCount]; + for (int i = 0; i < sliceCount; i++) { + slices[i] = new GroupWithSegmentTracking(i); } - Group minGroup; - for (int i = 0; i < sortedLeaves.size(); ++i) { - minGroup = groupQueue.poll(); - groupedLeaves.get(minGroup.index).add(IndexSearcher.LeafReaderContextPartition.createForEntireSegment(sortedLeaves.get(i))); - minGroup.sum += sortedLeaves.get(i).reader().maxDoc(); - groupQueue.offer(minGroup); + for (LeafReaderContextPartition partition : partitions) { + int segmentOrd = partition.ctx.ord; + int docCount = getPartitionDocCount(partition); + // Find slice with minimum load that doesn't have this segment + GroupWithSegmentTracking targetSlice = null; + long minLoad = Long.MAX_VALUE; + for (GroupWithSegmentTracking slice : slices) { + if (slice.hasSegment(segmentOrd) == false && slice.docCountSum < minLoad) { + minLoad = slice.docCountSum; + targetSlice = slice; + } + } + targetSlice.addPartition(partition, docCount); } + // Collect non-empty slices + List result = new ArrayList<>(sliceCount); + for (GroupWithSegmentTracking slice : slices) { + if (slice.partitions.isEmpty() == false) { + result.add(new IndexSearcher.LeafSlice(slice.partitions)); + } + } + return result.toArray(new IndexSearcher.LeafSlice[0]); + } - return groupedLeaves.stream().map(IndexSearcher.LeafSlice::new).toArray(IndexSearcher.LeafSlice[]::new); + private static int getPartitionDocCount(LeafReaderContextPartition partition) { + if (partition.maxDocId == Integer.MAX_VALUE) { + return partition.ctx.reader().maxDoc(); + } + return partition.maxDocId - partition.minDocId; } - static class Group implements Comparable { + static class GroupWithSegmentTracking implements Comparable { final int index; - int sum; + long docCountSum; + final Set segmentOrdinals; + final List partitions; - public Group(int index) { + public GroupWithSegmentTracking(int index) { this.index = index; - this.sum = 0; + this.docCountSum = 0; + this.segmentOrdinals = new HashSet<>(); + this.partitions = new ArrayList<>(); + } + + public boolean hasSegment(int segmentOrd) { + return segmentOrdinals.contains(segmentOrd); + } + + public void addPartition(LeafReaderContextPartition partition, long docCount) { + this.partitions.add(partition); + this.segmentOrdinals.add(partition.ctx.ord); + this.docCountSum += docCount; } @Override - public int compareTo(Group other) { - return Integer.compare(this.sum, other.sum); + public int compareTo(GroupWithSegmentTracking other) { + return Long.compare(this.docCountSum, other.docCountSum); } } } diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index e0e18daa2cc29..61c0b7bcb663b 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -43,6 +43,7 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.index.cache.bitset.BitsetFilterCache; @@ -92,6 +93,9 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY; + /** * This class encapsulates the state needed to execute a search. It holds a reference to the * shards point in time snapshot (IndexReader / ContextIndexSearcher) and allows passing on @@ -599,4 +603,18 @@ public boolean setFlushModeIfAbsent(FlushMode flushMode) { return false; } + public String getPartitionStrategy() { + return CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.getDefault(Settings.EMPTY); + } + + public int getPartitionMinSegmentSize() { + return CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE.getDefault(Settings.EMPTY); + } + + /** + * Evaluates whether this request should use intra-segment search based on query and aggregation analysis. + */ + public boolean shouldUseIntraSegmentSearch() { + return false; + } } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 5ccf3dd47217a..5fb949cdbbaa2 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -58,6 +58,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; +import static org.opensearch.index.IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE; +import static org.opensearch.index.IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; @@ -1173,4 +1175,36 @@ public void testPeriodicFlushIntervalDynamicUpdate() { ); assertEquals(TimeValue.MINUS_ONE, settings.getPeriodicFlushInterval()); } + + public void testPartitionStrategyDefault() { + IndexMetadata metadata = newIndexMeta("index", Settings.builder().build()); + IndexSettings settings = newIndexSettings(metadata, Settings.EMPTY); + assertEquals("segment", INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.get(settings.getSettings())); + } + + public void testPartitionStrategyValidValues() { + for (String strategy : new String[] { "segment", "balanced", "force" }) { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.getKey(), strategy).build() + ); + IndexSettings settings = newIndexSettings(metadata, Settings.EMPTY); + assertEquals(strategy, INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.get(settings.getSettings())); + } + } + + public void testPartitionMinSegmentSizeDefault() { + IndexMetadata metadata = newIndexMeta("index", Settings.builder().build()); + IndexSettings settings = newIndexSettings(metadata, Settings.EMPTY); + assertEquals(500_000, (int) INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE.get(settings.getSettings())); + } + + public void testPartitionMinSegmentSizeCustom() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE.getKey(), 100_000).build() + ); + IndexSettings settings = newIndexSettings(metadata, Settings.EMPTY); + assertEquals(100_000, (int) INDEX_CONCURRENT_SEGMENT_SEARCH_PARTITION_MIN_SEGMENT_SIZE.get(settings.getSettings())); + } } diff --git a/server/src/test/java/org/opensearch/index/query/AbstractQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/AbstractQueryBuilderTests.java index fdbb2ef43aa8e..76455e48da1bb 100644 --- a/server/src/test/java/org/opensearch/index/query/AbstractQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/AbstractQueryBuilderTests.java @@ -108,4 +108,9 @@ protected NamedXContentRegistry xContentRegistry() { return xContentRegistry; } + public void testDefaultSupportsIntraSegmentSearch() { + QueryBuilder query = new MatchAllQueryBuilder(); + assertFalse(query.supportsIntraSegmentSearch()); + } + } diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 853cd23885f73..b6c236654569a 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -70,6 +70,7 @@ import org.opensearch.index.query.AbstractQueryBuilder; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.ParsedQuery; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexShard; import org.opensearch.search.aggregations.AggregatorFactories; @@ -894,6 +895,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterSettings.registerSetting(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING); clusterSettings.registerSetting(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE); + clusterSettings.registerSetting(SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY); clusterSettings.applySettings( Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), "auto").build() ); @@ -1168,6 +1170,105 @@ public Optional create(IndexSettings indexSettin } assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + // Case8: no aggregations, query supports intra-segment search, partition strategy is balanced (default) + // should use concurrent search via intra-segment search path + when(decider1.getConcurrentSearchDecision()).thenReturn( + new ConcurrentSearchDecision(ConcurrentSearchDecision.DecisionStatus.NO_OP, "noop") + ); + when(decider2.getConcurrentSearchDecision()).thenReturn( + new ConcurrentSearchDecision(ConcurrentSearchDecision.DecisionStatus.NO_OP, "noop") + ); + + SearchSourceBuilder intraSegmentSourceBuilder = new SearchSourceBuilder(); + QueryBuilder intraSegmentQuery = mock(QueryBuilder.class); + when(intraSegmentQuery.supportsIntraSegmentSearch()).thenReturn(true); + intraSegmentSourceBuilder.query(intraSegmentQuery); + when(shardSearchRequest.source()).thenReturn(intraSegmentSourceBuilder); + + // Apply balanced partition strategy + clusterSettings.applySettings( + Settings.builder() + .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), "auto") + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.getKey(), "balanced") + .build() + ); + + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + concurrentSearchRequestDeciders + ); + // No aggregations set + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertTrue(context.shouldUseConcurrentSearch()); + } + + // Case9: no aggregations, query does NOT support intra-segment search + // should NOT use concurrent search + when(intraSegmentQuery.supportsIntraSegmentSearch()).thenReturn(false); + + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + concurrentSearchRequestDeciders + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + + // Case10: query supports intra-segment search with partition strategy as none + // should NOT use concurrent search + when(intraSegmentQuery.supportsIntraSegmentSearch()).thenReturn(true); + clusterSettings.applySettings( + Settings.builder() + .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), "auto") + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_PARTITION_STRATEGY.getKey(), "segment") + .build() + ); + + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + concurrentSearchRequestDeciders + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + // shutdown the threadpool threadPool.shutdown(); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java index a5724d3c34352..0afe663a85782 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java @@ -336,6 +336,11 @@ public void testBuildPipelineTreeResolvesPipelineOrder() { assertThat(tree.aggregators().stream().map(PipelineAggregator::name).collect(toList()), equalTo(Arrays.asList("foo", "bar"))); } + public void testAllFactoriesSupportIntraSegmentSearchEmpty() { + AggregatorFactories factories = AggregatorFactories.EMPTY; + assertTrue(factories.allFactoriesSupportIntraSegmentSearch()); + } + @Override protected NamedXContentRegistry xContentRegistry() { return xContentRegistry; diff --git a/server/src/test/java/org/opensearch/search/deciders/IntraSegmentSearchDeciderTests.java b/server/src/test/java/org/opensearch/search/deciders/IntraSegmentSearchDeciderTests.java new file mode 100644 index 0000000000000..a0563800fe6f9 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/deciders/IntraSegmentSearchDeciderTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.deciders; + +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IntraSegmentSearchDeciderTests extends OpenSearchTestCase { + + public void testQuerySupportsIntraSegment() { + IntraSegmentSearchDecider decider = new IntraSegmentSearchDecider(); + QueryBuilder query = mock(QueryBuilder.class); + when(query.supportsIntraSegmentSearch()).thenReturn(true); + decider.evaluateForQuery(query); + assertTrue(decider.shouldUseIntraSegmentSearch()); + assertEquals("query/aggregations support intra-segment search", decider.getReason()); + } + + public void testQueryDoesNotSupportIntraSegment() { + IntraSegmentSearchDecider decider = new IntraSegmentSearchDecider(); + QueryBuilder query = mock(QueryBuilder.class); + when(query.supportsIntraSegmentSearch()).thenReturn(false); + when(query.getName()).thenReturn("test_query"); + decider.evaluateForQuery(query); + assertFalse(decider.shouldUseIntraSegmentSearch()); + assertEquals("test_query does not support intra-segment search", decider.getReason()); + } + + public void testAggregationsSupportsIntraSegment() { + IntraSegmentSearchDecider decider = new IntraSegmentSearchDecider(); + AggregatorFactories aggs = mock(AggregatorFactories.class); + when(aggs.allFactoriesSupportIntraSegmentSearch()).thenReturn(true); + decider.evaluateForAggregations(aggs); + assertTrue(decider.shouldUseIntraSegmentSearch()); + assertEquals("query/aggregations support intra-segment search", decider.getReason()); + } + + public void testAggregationsDoesNotSupportIntraSegment() { + IntraSegmentSearchDecider decider = new IntraSegmentSearchDecider(); + AggregatorFactories aggs = mock(AggregatorFactories.class); + when(aggs.allFactoriesSupportIntraSegmentSearch()).thenReturn(false); + decider.evaluateForAggregations(aggs); + assertFalse(decider.shouldUseIntraSegmentSearch()); + assertEquals("some aggregations do not support intra-segment search", decider.getReason()); + } + + public void testQueryNoVetoesAggregationYes() { + IntraSegmentSearchDecider decider = new IntraSegmentSearchDecider(); + QueryBuilder query = mock(QueryBuilder.class); + when(query.supportsIntraSegmentSearch()).thenReturn(false); + when(query.getName()).thenReturn("test_query"); + decider.evaluateForQuery(query); + AggregatorFactories aggs = mock(AggregatorFactories.class); + when(aggs.allFactoriesSupportIntraSegmentSearch()).thenReturn(true); + decider.evaluateForAggregations(aggs); + assertFalse(decider.shouldUseIntraSegmentSearch()); + assertEquals("test_query does not support intra-segment search", decider.getReason()); + } + + public void testQueryYesAggregationNo() { + IntraSegmentSearchDecider decider = new IntraSegmentSearchDecider(); + QueryBuilder query = mock(QueryBuilder.class); + when(query.supportsIntraSegmentSearch()).thenReturn(true); + decider.evaluateForQuery(query); + AggregatorFactories aggs = mock(AggregatorFactories.class); + when(aggs.allFactoriesSupportIntraSegmentSearch()).thenReturn(false); + decider.evaluateForAggregations(aggs); + assertFalse(decider.shouldUseIntraSegmentSearch()); + assertEquals("some aggregations do not support intra-segment search", decider.getReason()); + } + + public void testBothSupport() { + IntraSegmentSearchDecider decider = new IntraSegmentSearchDecider(); + QueryBuilder query = mock(QueryBuilder.class); + when(query.supportsIntraSegmentSearch()).thenReturn(true); + decider.evaluateForQuery(query); + AggregatorFactories aggs = mock(AggregatorFactories.class); + when(aggs.allFactoriesSupportIntraSegmentSearch()).thenReturn(true); + decider.evaluateForAggregations(aggs); + assertTrue(decider.shouldUseIntraSegmentSearch()); + assertEquals("query/aggregations support intra-segment search", decider.getReason()); + } + + public void testNoQueryNoAggregations() { + IntraSegmentSearchDecider decider = new IntraSegmentSearchDecider(); + assertFalse(decider.shouldUseIntraSegmentSearch()); + assertEquals("no query or aggregation evaluated", decider.getReason()); + } +} diff --git a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java index e3e56455ad09b..e806fe764c0a2 100644 --- a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java @@ -82,7 +82,6 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.lucene.util.CombinedBitSet; -import org.opensearch.search.SearchService; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.metrics.InternalSum; @@ -319,58 +318,25 @@ public void onRemoval(ShardId shardId, Accountable accountable) { IOUtils.close(reader, w, dir); } - public void testSlicesInternal() throws Exception { + public void testSlicesWithMaxTargetSliceSupplier() throws Exception { final List leaves = getLeaves(10); - try ( - final Directory directory = newDirectory(); - IndexWriter iw = new IndexWriter( - directory, - new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) - ) - ) { - Document document = new Document(); - document.add(new StringField("field1", "value1", Field.Store.NO)); - document.add(new StringField("field2", "value1", Field.Store.NO)); - iw.addDocument(document); - iw.commit(); - try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { - SearchContext searchContext = mock(SearchContext.class); - IndexShard indexShard = mock(IndexShard.class); - when(searchContext.indexShard()).thenReturn(indexShard); - when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); - ContextIndexSearcher searcher = new ContextIndexSearcher( - directoryReader, - IndexSearcher.getDefaultSimilarity(), - IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), - true, - null, - searchContext - ); - // Case 1: Verify the slice count when lucene default slice computation is used - IndexSearcher.LeafSlice[] slices = searcher.slicesInternal( - leaves, - SearchService.CONCURRENT_SEGMENT_SEARCH_MIN_SLICE_COUNT_VALUE - ); - int expectedSliceCount = 2; - // 2 slices will be created since max segment per slice of 5 will be reached - assertEquals(expectedSliceCount, slices.length); - for (int i = 0; i < expectedSliceCount; ++i) { - assertEquals(5, slices[i].partitions.length); - } - - // Case 2: Verify the slice count when custom max slice computation is used - expectedSliceCount = 4; - slices = searcher.slicesInternal(leaves, expectedSliceCount); - - // 4 slices will be created with 3 leaves in first&last slices and 2 leaves in other slices - assertEquals(expectedSliceCount, slices.length); - assertEquals(3, slices[0].partitions.length); - assertEquals(2, slices[1].partitions.length); - assertEquals(2, slices[2].partitions.length); - assertEquals(3, slices[3].partitions.length); - } - } + int expectedSliceCount = 4; + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWholeSegments(leaves, expectedSliceCount); + assertEquals(expectedSliceCount, slices.length); + int totalPartitions = 0; + for (IndexSearcher.LeafSlice slice : slices) { + totalPartitions += slice.partitions.length; + } + assertEquals(3, slices[0].partitions.length); + assertEquals(3, slices[1].partitions.length); + assertEquals(2, slices[2].partitions.length); + assertEquals(2, slices[3].partitions.length); + assertEquals(10, totalPartitions); + expectedSliceCount = 2; + slices = MaxTargetSliceSupplier.getSlicesWholeSegments(leaves, expectedSliceCount); + assertEquals(expectedSliceCount, slices.length); + assertEquals(5, slices[0].partitions.length); + assertEquals(5, slices[1].partitions.length); } public void testGetSlicesWithNonNullExecutorButCSDisabled() throws Exception { @@ -423,9 +389,9 @@ public void testGetSlicesWithNonNullExecutorButCSDisabled() throws Exception { // 4 slices will be created with 3 leaves in first&last slices and 2 leaves in other slices assertEquals(expectedSliceCount, slices.length); assertEquals(3, slices[0].partitions.length); - assertEquals(2, slices[1].partitions.length); + assertEquals(3, slices[1].partitions.length); assertEquals(2, slices[2].partitions.length); - assertEquals(3, slices[3].partitions.length); + assertEquals(2, slices[3].partitions.length); } } } diff --git a/server/src/test/java/org/opensearch/search/internal/MaxTargetSliceSupplierTests.java b/server/src/test/java/org/opensearch/search/internal/MaxTargetSliceSupplierTests.java index 96a03540566c2..9d4bf7d2d20eb 100644 --- a/server/src/test/java/org/opensearch/search/internal/MaxTargetSliceSupplierTests.java +++ b/server/src/test/java/org/opensearch/search/internal/MaxTargetSliceSupplierTests.java @@ -30,8 +30,7 @@ public class MaxTargetSliceSupplierTests extends OpenSearchTestCase { public void testSliceCountGreaterThanLeafCount() throws Exception { int expectedSliceCount = 2; - IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(getLeaves(expectedSliceCount), 5); - // verify slice count is same as leaf count + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWholeSegments(getLeaves(expectedSliceCount), 5); assertEquals(expectedSliceCount, slices.length); for (int i = 0; i < expectedSliceCount; ++i) { assertEquals(1, slices[i].partitions.length); @@ -39,12 +38,30 @@ public void testSliceCountGreaterThanLeafCount() throws Exception { } public void testNegativeSliceCount() { - assertThrows(IllegalArgumentException.class, () -> MaxTargetSliceSupplier.getSlices(new ArrayList<>(), randomIntBetween(-3, 0))); + assertThrows( + IllegalArgumentException.class, + () -> MaxTargetSliceSupplier.getSlices(new ArrayList<>(), randomIntBetween(-3, 0), true, "force", 100) + ); + } + + public void testEmptyLeavesForcePartitioning() { + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWithForcePartitioning(new ArrayList<>(), 4); + assertEquals(0, slices.length); + } + + public void testEmptyLeavesAutoPartitioning() { + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWithAutoPartitioning(new ArrayList<>(), 4, 100); + assertEquals(0, slices.length); + } + + public void testDistributePartitionsEmpty() { + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.distributePartitions(new ArrayList<>(), 4); + assertEquals(0, slices.length); } public void testSingleSliceWithMultipleLeaves() throws Exception { int leafCount = randomIntBetween(1, 10); - IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(getLeaves(leafCount), 1); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWholeSegments(getLeaves(leafCount), 1); assertEquals(1, slices.length); assertEquals(leafCount, slices[0].partitions.length); } @@ -52,23 +69,19 @@ public void testSingleSliceWithMultipleLeaves() throws Exception { public void testSliceCountLessThanLeafCount() throws Exception { int leafCount = 12; List leaves = getLeaves(leafCount); - // Case 1: test with equal number of leaves per slice int expectedSliceCount = 3; - IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(leaves, expectedSliceCount); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWholeSegments(leaves, expectedSliceCount); int expectedLeavesPerSlice = leafCount / expectedSliceCount; - assertEquals(expectedSliceCount, slices.length); for (int i = 0; i < expectedSliceCount; ++i) { assertEquals(expectedLeavesPerSlice, slices[i].partitions.length); } - // Case 2: test with first 2 slice more leaves than others expectedSliceCount = 5; - slices = MaxTargetSliceSupplier.getSlices(leaves, expectedSliceCount); + slices = MaxTargetSliceSupplier.getSlicesWholeSegments(leaves, expectedSliceCount); int expectedLeavesInFirst2Slice = 3; int expectedLeavesInOtherSlice = 2; - assertEquals(expectedSliceCount, slices.length); for (int i = 0; i < expectedSliceCount; ++i) { if (i < 2) { @@ -80,7 +93,7 @@ public void testSliceCountLessThanLeafCount() throws Exception { } public void testEmptyLeaves() { - IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(new ArrayList<>(), 2); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWholeSegments(new ArrayList<>(), 2); assertEquals(0, slices.length); } @@ -111,17 +124,277 @@ public void testOptimizedGroup() throws Exception { iw.addDocument(document); } iw.commit(); - try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { List leaves = directoryReader.leaves(); assertEquals(3, leaves.size()); - IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(leaves, 2); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWholeSegments(leaves, 2); + assertEquals(2, slices.length); assertEquals(1, slices[0].partitions.length); assertEquals(3, slices[0].getMaxDocs()); - assertEquals(2, slices[1].partitions.length); assertEquals(2, slices[1].getMaxDocs()); } } } + + public void testForcePartitioningSingleSegment() throws Exception { + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + for (int i = 0; i < 100; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List leaves = directoryReader.leaves(); + assertEquals(1, leaves.size()); + // 1 segment of 100 docs → 4 partitions of 25 docs each + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWithForcePartitioning(leaves, 4); + assertEquals(4, slices.length); + // Each slice has 1 partition with 25 docs + assertEquals(1, slices[0].partitions.length); + assertEquals(25, slices[0].getMaxDocs()); + assertEquals(1, slices[1].partitions.length); + assertEquals(25, slices[1].getMaxDocs()); + assertEquals(1, slices[2].partitions.length); + assertEquals(25, slices[2].getMaxDocs()); + assertEquals(1, slices[3].partitions.length); + assertEquals(25, slices[3].getMaxDocs()); + } + } + } + + public void testForcePartitioningMultipleSegments() throws Exception { + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + // Create 2 segments with 100 docs each + for (int i = 0; i < 100; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + for (int i = 0; i < 100; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List leaves = directoryReader.leaves(); + assertEquals(2, leaves.size()); + // Force partitions each segment into 4 partitions = 8 total, distributed to 4 slices + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWithForcePartitioning(leaves, 4); + assertEquals(4, slices.length); + assertEquals(2, slices[0].partitions.length); + assertEquals(50, slices[0].getMaxDocs()); + assertEquals(2, slices[1].partitions.length); + assertEquals(50, slices[1].getMaxDocs()); + assertEquals(2, slices[2].partitions.length); + assertEquals(50, slices[2].getMaxDocs()); + assertEquals(2, slices[3].partitions.length); + assertEquals(50, slices[3].getMaxDocs()); + } + } + } + + public void testBalancedPartitioningLargeSegment() throws Exception { + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + for (int i = 0; i < 1000; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List leaves = directoryReader.leaves(); + assertEquals(1, leaves.size()); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWithAutoPartitioning(leaves, 4, 100); + assertEquals(4, slices.length); + assertEquals(1, slices[0].partitions.length); + assertEquals(250, slices[0].getMaxDocs()); + assertEquals(1, slices[1].partitions.length); + assertEquals(250, slices[1].getMaxDocs()); + assertEquals(1, slices[2].partitions.length); + assertEquals(250, slices[2].getMaxDocs()); + assertEquals(1, slices[3].partitions.length); + assertEquals(250, slices[3].getMaxDocs()); + } + } + } + + public void testBalancedPartitioningSmallSegmentSkipped() throws Exception { + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + for (int i = 0; i < 50; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List leaves = directoryReader.leaves(); + assertEquals(1, leaves.size()); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWithAutoPartitioning(leaves, 4, 100); + assertEquals(1, slices.length); + assertEquals(1, slices[0].partitions.length); + } + } + } + + public void testGetSlicesWithNoneStrategy() throws Exception { + List leaves = getLeaves(4); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(leaves, 2, false, "segment", 100); + assertEquals(2, slices.length); + } + + public void testGetSlicesWithForceStrategy() throws Exception { + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + for (int i = 0; i < 100; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List leaves = directoryReader.leaves(); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(leaves, 4, true, "force", 100); + assertEquals(4, slices.length); + assertEquals(1, slices[0].partitions.length); + assertEquals(25, slices[0].getMaxDocs()); + assertEquals(1, slices[1].partitions.length); + assertEquals(25, slices[1].getMaxDocs()); + assertEquals(1, slices[2].partitions.length); + assertEquals(25, slices[2].getMaxDocs()); + assertEquals(1, slices[3].partitions.length); + assertEquals(25, slices[3].getMaxDocs()); + } + } + } + + public void testGetSlicesWithBalancedStrategy() throws Exception { + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + // Create 1 large segment (1000 docs) that exceeds minSegmentSize + for (int i = 0; i < 1000; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List leaves = directoryReader.leaves(); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(leaves, 4, true, "balanced", 100); + assertEquals(4, slices.length); + assertEquals(1, slices[0].partitions.length); + assertEquals(250, slices[0].getMaxDocs()); + assertEquals(1, slices[1].partitions.length); + assertEquals(250, slices[1].getMaxDocs()); + assertEquals(1, slices[2].partitions.length); + assertEquals(250, slices[2].getMaxDocs()); + assertEquals(1, slices[3].partitions.length); + assertEquals(250, slices[3].getMaxDocs()); + } + } + } + + public void testGetSlicesWithBalancedStrategyMultipleSegments() throws Exception { + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + // Create 1 large segment (400 docs) and 2 small segments (50 docs each) + for (int i = 0; i < 400; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + for (int i = 0; i < 50; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + for (int i = 0; i < 50; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List leaves = directoryReader.leaves(); + assertEquals(3, leaves.size()); + // minSegmentSize=100 → only large segment (400 docs) gets partitioned into 2 + // Small segments (50 docs each) stay whole + // Total partitions = 2 (from large) + 2 (small whole) = 4, distributed to 2 slices + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(leaves, 2, true, "balanced", 100); + assertEquals(2, slices.length); + assertEquals(2, slices[0].partitions.length); + assertEquals(250, slices[0].getMaxDocs()); + assertEquals(2, slices[1].partitions.length); + assertEquals(250, slices[1].getMaxDocs()); + } + } + } + + public void testSameSegmentPartitionsInDifferentSlices() throws Exception { + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + for (int i = 0; i < 100; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value", Field.Store.NO)); + iw.addDocument(document); + } + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List leaves = directoryReader.leaves(); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlicesWithForcePartitioning(leaves, 4); + for (IndexSearcher.LeafSlice slice : slices) { + assertEquals(1, slice.partitions.length); + } + } + } + } } diff --git a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java index f03cbe266df86..d863cabf16673 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java @@ -718,6 +718,11 @@ public boolean getStarTreeIndexEnabled() { return indexService != null && indexService.getIndexSettings().getStarTreeIndexEnabled(); } + @Override + public boolean shouldUseIntraSegmentSearch() { + return false; + } + /** * Clean the query results by consuming all of it */