From 0ca4ed0b62ecc699547a8fbedd150f28f406b03a Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Thu, 22 Aug 2024 12:33:52 +0530 Subject: [PATCH 01/17] Add log when download completes with file size (#15224) Signed-off-by: Gaurav Bafna --- .../org/opensearch/index/store/RemoteStoreFileDownloader.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java index 134994c0ae32c..ad42b6d677b41 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java @@ -149,6 +149,7 @@ private void copyOneFile( try { cancellableThreads.executeIO(() -> { destination.copyFrom(source, file, file, IOContext.DEFAULT); + logger.trace("Downloaded file {} of size {}", file, destination.fileLength(file)); onFileCompletion.run(); if (secondDestination != null) { secondDestination.copyFrom(destination, file, file, IOContext.DEFAULT); From abb1041c10c81964c7e0a93ddbaa714a2643bff2 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 22 Aug 2024 12:18:41 -0400 Subject: [PATCH 02/17] Support Filtering on Large List encoded by Bitmap (version update) (#15352) Signed-off-by: Andriy Redko --- .../java/org/opensearch/index/query/TermsQueryBuilder.java | 4 ++-- server/src/main/java/org/opensearch/indices/TermsLookup.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java index 21ff90f2bf534..4b92d6a1f5460 100644 --- a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java @@ -251,7 +251,7 @@ public TermsQueryBuilder(StreamInput in) throws IOException { termsLookup = in.readOptionalWriteable(TermsLookup::new); values = (List) in.readGenericValue(); this.supplier = null; - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_17_0)) { valueType = in.readEnum(ValueType.class); } } @@ -264,7 +264,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeOptionalWriteable(termsLookup); out.writeGenericValue(values); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_17_0)) { out.writeEnum(valueType); } } diff --git a/server/src/main/java/org/opensearch/indices/TermsLookup.java b/server/src/main/java/org/opensearch/indices/TermsLookup.java index 6b83b713c05c2..7337ed31ce41e 100644 --- a/server/src/main/java/org/opensearch/indices/TermsLookup.java +++ b/server/src/main/java/org/opensearch/indices/TermsLookup.java @@ -87,7 +87,7 @@ public TermsLookup(StreamInput in) throws IOException { path = in.readString(); index = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_17_0)) { store = in.readBoolean(); } } @@ -101,7 +101,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(path); out.writeString(index); out.writeOptionalString(routing); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_17_0)) { out.writeBoolean(store); } } From b6b04039ab9b05de4da50863503b68f6be74e424 Mon Sep 17 00:00:00 2001 From: Ganesh Krishna Ramadurai Date: Thu, 22 Aug 2024 13:57:42 -0700 Subject: [PATCH 03/17] Add support for index level slice count setting (#15336) Signed-off-by: Ganesh Ramadurai --- CHANGELOG.md | 1 + .../common/settings/IndexScopedSettings.java | 1 + .../org/opensearch/index/IndexSettings.java | 9 ++ .../search/DefaultSearchContext.java | 9 +- .../opensearch/search/SearchServiceTests.java | 101 ++++++++++++++++++ 5 files changed, 120 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6943e538fdcc..f0a54f2eab9f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support filtering on a large list encoded by bitmap ([#14774](https://github.com/opensearch-project/OpenSearch/pull/14774)) - Add slice execution listeners to SearchOperationListener interface ([#15153](https://github.com/opensearch-project/OpenSearch/pull/15153)) - Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895)) +- Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index a4d60bc76127c..284eb43aa5509 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -238,6 +238,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for concurrent segment search IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING, + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT, IndexSettings.ALLOW_DERIVED_FIELDS, // Settings for star tree index diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index a833d66fab5d9..9cab68d646b6e 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -76,6 +76,7 @@ import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE; /** * This class encapsulates all index level settings and handles settings updates. @@ -691,6 +692,14 @@ public static IndexMergePolicy fromString(String text) { Property.Dynamic ); + public static final Setting INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT = Setting.intSetting( + "index.search.concurrent.max_slice_count", + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, + Property.Dynamic, + Property.IndexScope + ); + public static final Setting INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING = Setting.boolSetting( "index.optimize_doc_id_lookup.fuzzy_set.enabled", false, diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index abb968c2de245..1401c6b89d38a 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -991,7 +991,14 @@ public int getTargetMaxSliceCount() { if (shouldUseConcurrentSearch() == false) { throw new IllegalStateException("Target slice count should not be used when concurrent search is disabled"); } - return clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING); + + return indexService.getIndexSettings() + .getSettings() + .getAsInt( + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT.getKey(), + clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING) + ); + } @Override diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 1caa2c99fc3b8..e8a0f70ee3563 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -54,6 +54,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; @@ -1413,6 +1414,106 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { .get(); } + /** + * Tests that the slice count is calculated correctly when concurrent search is enabled + * If concurrent search enabled - + * pick index level slice count setting if index level setting is set + * else pick default cluster level slice count setting + * @throws IOException + */ + public void testConcurrentSegmentSearchSliceCount() throws IOException { + + String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + IndexService indexService = createIndex(index); + final SearchService service = getInstanceFromNode(SearchService.class); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + ShardId shardId = new ShardId(indexService.index(), 0); + long nowInMillis = System.currentTimeMillis(); + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); + SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(randomBoolean()); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shardId, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + nowInMillis, + clusterAlias, + Strings.EMPTY_ARRAY + ); + // enable concurrent search + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true)) + .get(); + + Integer[][] scenarios = { + // cluster setting, index setting, expected slice count + // expected value null will pick up default value from settings + { null, null, clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING) }, + { 4, null, 4 }, + { null, 3, 3 }, + { 4, 3, 3 }, }; + + for (Integer[] sliceCounts : scenarios) { + Integer clusterSliceCount = sliceCounts[0]; + Integer indexSliceCount = sliceCounts[1]; + Integer targetSliceCount = sliceCounts[2]; + + if (clusterSliceCount != null) { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), clusterSliceCount) + ) + .get(); + } else { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().putNull(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey()) + ) + .get(); + } + if (indexSliceCount != null) { + client().admin() + .indices() + .prepareUpdateSettings(index) + .setSettings( + Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT.getKey(), indexSliceCount) + ) + .get(); + } else { + client().admin() + .indices() + .prepareUpdateSettings(index) + .setSettings(Settings.builder().putNull(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT.getKey())) + .get(); + } + + try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { + searchContext.evaluateRequestShouldUseConcurrentSearch(); + assertEquals(targetSliceCount.intValue(), searchContext.getTargetMaxSliceCount()); + } + } + // cleanup + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()) + .putNull(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey()) + ) + .get(); + } + /** * Test that the Search Context for concurrent segment search enabled is set correctly at the time of construction. * The same is used throughout the context object lifetime even if cluster setting changes before the request completion. From d5a6c0bffe405e97021bf7405a92d2c9b050245c Mon Sep 17 00:00:00 2001 From: Sarat Vemulapalli Date: Thu, 22 Aug 2024 15:34:10 -0700 Subject: [PATCH 04/17] Adding allowlist setting for ingest-useragent and ingest-geoip processors (#15325) * Adding allowlist setting for user-agent, geo-ip and updated tests for ingest-common. Signed-off-by: Sarat Vemulapalli * Remove duplicate test in ingest-common Signed-off-by: Sarat Vemulapalli * Adding changelog Signed-off-by: Sarat Vemulapalli --------- Signed-off-by: Sarat Vemulapalli --- CHANGELOG.md | 1 + .../ingest/geoip/IngestGeoIpModulePlugin.java | 40 +++++- .../geoip/IngestGeoIpModulePluginTests.java | 95 +++++++++++++++ .../IngestUserAgentModulePlugin.java | 39 +++++- .../IngestUserAgentModulePluginTests.java | 114 ++++++++++++++++++ 5 files changed, 286 insertions(+), 3 deletions(-) create mode 100644 modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index f0a54f2eab9f6..a1f3d9287e4a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Workload Management] QueryGroup resource tracking framework changes ([#13897](https://github.com/opensearch-project/OpenSearch/pull/13897)) - Support filtering on a large list encoded by bitmap ([#14774](https://github.com/opensearch-project/OpenSearch/pull/14774)) - Add slice execution listeners to SearchOperationListener interface ([#15153](https://github.com/opensearch-project/OpenSearch/pull/15153)) +- Add allowlist setting for ingest-geoip and ingest-useragent ([#15325](https://github.com/opensearch-project/OpenSearch/pull/15325)) - Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895)) - Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336)) diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java index 524d1719c37c0..742b923f05199 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java @@ -44,6 +44,7 @@ import org.opensearch.common.cache.CacheBuilder; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; @@ -62,10 +63,18 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Closeable { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.geoip.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); static String[] DEFAULT_DATABASE_FILENAMES = new String[] { "GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb" }; @@ -74,7 +83,7 @@ public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Clo @Override public List> getSettings() { - return Arrays.asList(CACHE_SIZE); + return Arrays.asList(CACHE_SIZE, PROCESSORS_ALLOWLIST_SETTING); } @Override @@ -90,7 +99,10 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))); + return filterForAllowlistSetting( + parameters.env.settings(), + Map.of(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))) + ); } /* @@ -175,6 +187,30 @@ public void close() throws IOException { } } + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); + } + /** * The in-memory cache for the geoip data. There should only be 1 instance of this class.. * This cache differs from the maxmind's {@link NodeCache} such that this cache stores the deserialized Json objects to avoid the diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java index f6d42d1d65670..9446ec1228532 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java @@ -35,8 +35,20 @@ import com.maxmind.geoip2.model.AbstractResponse; import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.StreamsUtils; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; import static org.mockito.Mockito.mock; @@ -77,4 +89,87 @@ public void testInvalidInit() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new GeoIpCache(-1)); assertEquals("geoip max cache size must be 0 or greater", ex.getMessage()); } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("geoip")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + Settings.Builder settingsBuilder = Settings.builder() + .putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), invalidAllowList); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settingsBuilder.build())) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.remove(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + final Set expected = Set.of("geoip"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + createDb(settingsBuilder); + final Settings settings = settingsBuilder.putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList).build(); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private void createDb(Settings.Builder settingsBuilder) throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-geoip"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder.put("ingest.geoip.database_path", configDir).put("path.home", configDir); + try { + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-City.mmdb")), + configDir.resolve("GeoLite2-City.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-Country.mmdb")), + configDir.resolve("GeoLite2-Country.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-ASN.mmdb")), + configDir.resolve("GeoLite2-ASN.mmdb") + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } } diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java index cd96fcf2347ef..bac90d20b44e1 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.useragent; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; import org.opensearch.plugins.Plugin; @@ -47,10 +48,19 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestUserAgentModulePlugin extends Plugin implements IngestPlugin { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.useragent.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); private final Setting CACHE_SIZE_SETTING = Setting.longSetting( "ingest.user_agent.cache_size", 1000, @@ -77,7 +87,34 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)); + return filterForAllowlistSetting( + parameters.env.settings(), + Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)) + ); + } + + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); } static Map createUserAgentParsers(Path userAgentConfigDirectory, UserAgentCache cache) throws IOException { diff --git a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java new file mode 100644 index 0000000000000..31fdafff1188a --- /dev/null +++ b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.useragent; + +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; + +public class IngestUserAgentModulePluginTests extends OpenSearchTestCase { + private Settings.Builder settingsBuilder; + + @Before + public void setup() throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-user-agent"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder = Settings.builder().put("ingest-user-agent", configDir).put("path.home", configDir); + + // Copy file, leaving out the device parsers at the end + String regexWithoutDevicesFilename = "regexes_without_devices.yml"; + try ( + BufferedReader reader = new BufferedReader( + new InputStreamReader(UserAgentProcessor.class.getResourceAsStream("/regexes.yml"), StandardCharsets.UTF_8) + ); + BufferedWriter writer = Files.newBufferedWriter(userAgentConfigDir.resolve(regexWithoutDevicesFilename)); + ) { + String line; + while ((line = reader.readLine()) != null) { + if (line.startsWith("device_parsers:")) { + break; + } + + writer.write(line); + writer.newLine(); + } + } + } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("user_agent")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + final Settings settings = settingsBuilder.putList( + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), + invalidAllowList + ).build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settings)) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + settingsBuilder.remove(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + final Set expected = Set.of("user_agent"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + final Settings settings = settingsBuilder.putList(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList) + .build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } +} From ed65482b8ae9b3285271e56cf0dca04786fb2d4a Mon Sep 17 00:00:00 2001 From: Ruirui Zhang Date: Thu, 22 Aug 2024 16:53:13 -0700 Subject: [PATCH 05/17] Add Delete QueryGroup API Logic (#14735) * Add Delete QueryGroup API Logic Signed-off-by: Ruirui Zhang * modify changelog Signed-off-by: Ruirui Zhang * include comments from create pr Signed-off-by: Ruirui Zhang * remove delete all Signed-off-by: Ruirui Zhang * rebase and address comments Signed-off-by: Ruirui Zhang * rebase Signed-off-by: Ruirui Zhang * address comments Signed-off-by: Ruirui Zhang * address comments Signed-off-by: Ruirui Zhang * address comments Signed-off-by: Ruirui Zhang * add UT coverage Signed-off-by: Ruirui Zhang --- CHANGELOG.md | 1 + .../plugin/wlm/WorkloadManagementPlugin.java | 15 ++- .../wlm/WorkloadManagementPluginModule.java | 31 +++++++ .../wlm/action/DeleteQueryGroupAction.java | 38 ++++++++ .../wlm/action/DeleteQueryGroupRequest.java | 65 +++++++++++++ .../TransportCreateQueryGroupAction.java | 8 +- .../TransportDeleteQueryGroupAction.java | 91 +++++++++++++++++++ .../wlm/rest/RestDeleteQueryGroupAction.java | 57 ++++++++++++ .../service/QueryGroupPersistenceService.java | 52 +++++++++++ .../action/CreateQueryGroupResponseTests.java | 4 +- .../action/DeleteQueryGroupRequestTests.java | 42 +++++++++ .../TransportDeleteQueryGroupActionTests.java | 63 +++++++++++++ .../rest/RestDeleteQueryGroupActionTests.java | 85 +++++++++++++++++ .../QueryGroupPersistenceServiceTests.java | 58 ++++++++++++ .../api/delete_query_group_context.json | 22 +++++ .../rest-api-spec/test/wlm/10_query_group.yml | 6 ++ .../opensearch/cluster/metadata/Metadata.java | 13 ++- 17 files changed, 636 insertions(+), 15 deletions(-) create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java create mode 100644 plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java create mode 100644 plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java create mode 100644 plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json diff --git a/CHANGELOG.md b/CHANGELOG.md index a1f3d9287e4a8..cd02af4f625b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) - [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) - Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) +- [Workload Management] Add Delete QueryGroup API Logic ([#14735](https://github.com/opensearch-project/OpenSearch/pull/14735)) - [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) - [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680))- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680)) diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java index 6b4496af76dc3..64f510fa1db67 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.inject.Module; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; @@ -18,10 +19,13 @@ import org.opensearch.common.settings.SettingsFilter; import org.opensearch.core.action.ActionResponse; import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; import org.opensearch.plugin.wlm.action.GetQueryGroupAction; import org.opensearch.plugin.wlm.action.TransportCreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportDeleteQueryGroupAction; import org.opensearch.plugin.wlm.action.TransportGetQueryGroupAction; import org.opensearch.plugin.wlm.rest.RestCreateQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestDeleteQueryGroupAction; import org.opensearch.plugin.wlm.rest.RestGetQueryGroupAction; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; import org.opensearch.plugins.ActionPlugin; @@ -29,6 +33,7 @@ import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; +import java.util.Collection; import java.util.List; import java.util.function.Supplier; @@ -46,7 +51,8 @@ public WorkloadManagementPlugin() {} public List> getActions() { return List.of( new ActionPlugin.ActionHandler<>(CreateQueryGroupAction.INSTANCE, TransportCreateQueryGroupAction.class), - new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class) + new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class), + new ActionPlugin.ActionHandler<>(DeleteQueryGroupAction.INSTANCE, TransportDeleteQueryGroupAction.class) ); } @@ -60,11 +66,16 @@ public List getRestHandlers( IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster ) { - return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction()); + return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction(), new RestDeleteQueryGroupAction()); } @Override public List> getSettings() { return List.of(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); } + + @Override + public Collection createGuiceModules() { + return List.of(new WorkloadManagementPluginModule()); + } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java new file mode 100644 index 0000000000000..b7c7805639eb2 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.common.inject.AbstractModule; +import org.opensearch.common.inject.Singleton; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; + +/** + * Guice Module to manage WorkloadManagement related objects + */ +public class WorkloadManagementPluginModule extends AbstractModule { + + /** + * Constructor for WorkloadManagementPluginModule + */ + public WorkloadManagementPluginModule() {} + + @Override + protected void configure() { + // Bind QueryGroupPersistenceService as a singleton to ensure a single instance is used, + // preventing multiple throttling key registrations in the constructor. + bind(QueryGroupPersistenceService.class).in(Singleton.class); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java new file mode 100644 index 0000000000000..c78952a2f89ad --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.master.AcknowledgedResponse; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupAction extends ActionType { + + /** + /** + * An instance of DeleteQueryGroupAction + */ + public static final DeleteQueryGroupAction INSTANCE = new DeleteQueryGroupAction(); + + /** + * Name for DeleteQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_delete"; + + /** + * Default constructor + */ + private DeleteQueryGroupAction() { + super(NAME, AcknowledgedResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java new file mode 100644 index 0000000000000..e514943c2c7e9 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupRequest extends AcknowledgedRequest { + private final String name; + + /** + * Default constructor for DeleteQueryGroupRequest + * @param name - name for the QueryGroup to get + */ + public DeleteQueryGroupRequest(String name) { + this.name = name; + } + + /** + * Constructor for DeleteQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public DeleteQueryGroupRequest(StreamInput in) throws IOException { + super(in); + name = in.readOptionalString(); + } + + @Override + public ActionRequestValidationException validate() { + if (name == null) { + ActionRequestValidationException actionRequestValidationException = new ActionRequestValidationException(); + actionRequestValidationException.addValidationError("QueryGroup name is missing"); + return actionRequestValidationException; + } + return null; + } + + /** + * Name getter + */ + public String getName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(name); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java index 01aa8cfb5e610..190ff17261bb4 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java @@ -14,7 +14,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; import org.opensearch.tasks.Task; -import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; /** @@ -24,7 +23,6 @@ */ public class TransportCreateQueryGroupAction extends HandledTransportAction { - private final ThreadPool threadPool; private final QueryGroupPersistenceService queryGroupPersistenceService; /** @@ -33,7 +31,6 @@ public class TransportCreateQueryGroupAction extends HandledTransportAction listener) { - threadPool.executor(ThreadPool.Names.SAME) - .execute(() -> queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener)); + queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..e4d3908d4a208 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class TransportDeleteQueryGroupAction extends TransportClusterManagerNodeAction { + + private final QueryGroupPersistenceService queryGroupPersistenceService; + + /** + * Constructor for TransportDeleteQueryGroupAction + * + * @param clusterService - a {@link ClusterService} object + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param threadPool - a {@link ThreadPool} object + * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object + * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + */ + @Inject + public TransportDeleteQueryGroupAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + ThreadPool threadPool, + IndexNameExpressionResolver indexNameExpressionResolver, + QueryGroupPersistenceService queryGroupPersistenceService + ) { + super( + DeleteQueryGroupAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteQueryGroupRequest::new, + indexNameExpressionResolver + ); + this.queryGroupPersistenceService = queryGroupPersistenceService; + } + + @Override + protected void clusterManagerOperation( + DeleteQueryGroupRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..8ad621cf8a1e4 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action to delete a QueryGroup + * + * @opensearch.experimental + */ +public class RestDeleteQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestDeleteQueryGroupAction + */ + public RestDeleteQueryGroupAction() {} + + @Override + public String getName() { + return "delete_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(DELETE, "_wlm/query_group/{name}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteQueryGroupRequest deleteQueryGroupRequest = new DeleteQueryGroupRequest(request.param("name")); + deleteQueryGroupRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteQueryGroupRequest.clusterManagerNodeTimeout()) + ); + deleteQueryGroupRequest.timeout(request.paramAsTime("timeout", deleteQueryGroupRequest.timeout())); + return channel -> client.execute(DeleteQueryGroupAction.INSTANCE, deleteQueryGroupRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java index fe7080da78bbe..ba5161a2c855e 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java @@ -10,10 +10,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterManagerTaskThrottler.ThrottlingKey; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; @@ -24,6 +28,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; import org.opensearch.search.ResourceType; import java.util.Collection; @@ -38,6 +43,7 @@ public class QueryGroupPersistenceService { static final String SOURCE = "query-group-persistence-service"; private static final String CREATE_QUERY_GROUP_THROTTLING_KEY = "create-query-group"; + private static final String DELETE_QUERY_GROUP_THROTTLING_KEY = "delete-query-group"; private static final Logger logger = LogManager.getLogger(QueryGroupPersistenceService.class); /** * max QueryGroup count setting name @@ -65,6 +71,7 @@ public class QueryGroupPersistenceService { private final ClusterService clusterService; private volatile int maxQueryGroupCount; final ThrottlingKey createQueryGroupThrottlingKey; + final ThrottlingKey deleteQueryGroupThrottlingKey; /** * Constructor for QueryGroupPersistenceService @@ -81,6 +88,7 @@ public QueryGroupPersistenceService( ) { this.clusterService = clusterService; this.createQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(CREATE_QUERY_GROUP_THROTTLING_KEY, true); + this.deleteQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(DELETE_QUERY_GROUP_THROTTLING_KEY, true); setMaxQueryGroupCount(MAX_QUERY_GROUP_COUNT.get(settings)); clusterSettings.addSettingsUpdateConsumer(MAX_QUERY_GROUP_COUNT, this::setMaxQueryGroupCount); } @@ -212,6 +220,50 @@ public static Collection getFromClusterStateMetadata(String name, Cl .collect(Collectors.toList()); } + /** + * Modify cluster state to delete the QueryGroup + * @param deleteQueryGroupRequest - request to delete a QueryGroup + * @param listener - ActionListener for AcknowledgedResponse + */ + public void deleteInClusterStateMetadata( + DeleteQueryGroupRequest deleteQueryGroupRequest, + ActionListener listener + ) { + clusterService.submitStateUpdateTask(SOURCE, new AckedClusterStateUpdateTask<>(deleteQueryGroupRequest, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return deleteQueryGroupInClusterState(deleteQueryGroupRequest.getName(), currentState); + } + + @Override + public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey() { + return deleteQueryGroupThrottlingKey; + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + /** + * Modify cluster state to delete the QueryGroup, and return the new cluster state + * @param name - the name for QueryGroup to be deleted + * @param currentClusterState - current cluster state + */ + ClusterState deleteQueryGroupInClusterState(final String name, final ClusterState currentClusterState) { + final Metadata metadata = currentClusterState.metadata(); + final QueryGroup queryGroupToRemove = metadata.queryGroups() + .values() + .stream() + .filter(queryGroup -> queryGroup.getName().equals(name)) + .findAny() + .orElseThrow(() -> new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name)); + + return ClusterState.builder(currentClusterState).metadata(Metadata.builder(metadata).remove(queryGroupToRemove).build()).build(); + } + /** * maxQueryGroupCount getter */ diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java index 038f015713c5b..ecb9a6b2dc0d2 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java @@ -27,7 +27,7 @@ public class CreateQueryGroupResponseTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of CreateQueryGroupResponse. + * Test case to verify serialization and deserialization of CreateQueryGroupResponse. */ public void testSerialization() throws IOException { CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); @@ -46,7 +46,7 @@ public void testSerialization() throws IOException { } /** - * Test case to verify the toXContent method of CreateQueryGroupResponse. + * Test case to validate the toXContent method of CreateQueryGroupResponse. */ public void testToXContentCreateQueryGroup() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java new file mode 100644 index 0000000000000..bc2e4f0faca4c --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of DeleteQueryGroupRequest. + */ + public void testSerialization() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); + assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + DeleteQueryGroupRequest otherRequest = new DeleteQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case to validate a DeleteQueryGroupRequest. + */ + public void testSerializationWithNull() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest((String) null); + ActionRequestValidationException actionRequestValidationException = request.validate(); + assertFalse(actionRequestValidationException.getMessage().isEmpty()); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..253d65f8da80f --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class TransportDeleteQueryGroupActionTests extends OpenSearchTestCase { + + ClusterService clusterService = mock(ClusterService.class); + TransportService transportService = mock(TransportService.class); + ActionFilters actionFilters = mock(ActionFilters.class); + ThreadPool threadPool = mock(ThreadPool.class); + IndexNameExpressionResolver indexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + QueryGroupPersistenceService queryGroupPersistenceService = mock(QueryGroupPersistenceService.class); + + TransportDeleteQueryGroupAction action = new TransportDeleteQueryGroupAction( + clusterService, + transportService, + actionFilters, + threadPool, + indexNameExpressionResolver, + queryGroupPersistenceService + ); + + /** + * Test case to validate the construction for TransportDeleteQueryGroupAction + */ + public void testConstruction() { + assertNotNull(action); + assertEquals(ThreadPool.Names.SAME, action.executor()); + } + + /** + * Test case to validate the clusterManagerOperation function in TransportDeleteQueryGroupAction + */ + public void testClusterManagerOperation() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest("testGroup"); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + ClusterState clusterState = mock(ClusterState.class); + action.clusterManagerOperation(request, clusterState, listener); + verify(queryGroupPersistenceService).deleteInClusterStateMetadata(eq(request), eq(listener)); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..72191e076bb87 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.List; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +public class RestDeleteQueryGroupActionTests extends OpenSearchTestCase { + /** + * Test case to validate the construction for RestDeleteQueryGroupAction + */ + public void testConstruction() { + RestDeleteQueryGroupAction action = new RestDeleteQueryGroupAction(); + assertNotNull(action); + assertEquals("delete_query_group", action.getName()); + List routes = action.routes(); + assertEquals(1, routes.size()); + RestHandler.Route route = routes.get(0); + assertEquals(DELETE, route.getMethod()); + assertEquals("_wlm/query_group/{name}", route.getPath()); + } + + /** + * Test case to validate the prepareRequest logic for RestDeleteQueryGroupAction + */ + @SuppressWarnings("unchecked") + public void testPrepareRequest() throws Exception { + RestDeleteQueryGroupAction restDeleteQueryGroupAction = new RestDeleteQueryGroupAction(); + NodeClient nodeClient = mock(NodeClient.class); + RestRequest realRequest = new FakeRestRequest(); + realRequest.params().put("name", NAME_ONE); + ; + RestRequest spyRequest = spy(realRequest); + + doReturn(TimeValue.timeValueSeconds(30)).when(spyRequest).paramAsTime(eq("cluster_manager_timeout"), any(TimeValue.class)); + doReturn(TimeValue.timeValueSeconds(60)).when(spyRequest).paramAsTime(eq("timeout"), any(TimeValue.class)); + + CheckedConsumer consumer = restDeleteQueryGroupAction.prepareRequest(spyRequest, nodeClient); + assertNotNull(consumer); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteQueryGroupRequest.class); + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(RestToXContentListener.class); + doNothing().when(nodeClient).execute(eq(DeleteQueryGroupAction.INSTANCE), requestCaptor.capture(), listenerCaptor.capture()); + + consumer.accept(mock(RestChannel.class)); + DeleteQueryGroupRequest capturedRequest = requestCaptor.getValue(); + assertEquals(NAME_ONE, capturedRequest.getName()); + assertEquals(TimeValue.timeValueSeconds(30), capturedRequest.clusterManagerNodeTimeout()); + assertEquals(TimeValue.timeValueSeconds(60), capturedRequest.timeout()); + verify(nodeClient).execute( + eq(DeleteQueryGroupAction.INSTANCE), + any(DeleteQueryGroupRequest.class), + any(RestToXContentListener.class) + ); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java index 2aa3b9e168852..a516ffdde839e 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java @@ -8,6 +8,9 @@ package org.opensearch.plugin.wlm.service; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -20,6 +23,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.wlm.QueryGroupTestUtils; import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; import org.opensearch.search.ResourceType; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; @@ -39,6 +43,7 @@ import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MONITOR_STRING; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_TWO; import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_ONE; import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_TWO; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; @@ -48,12 +53,14 @@ import static org.opensearch.plugin.wlm.QueryGroupTestUtils.preparePersistenceServiceSetup; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupList; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupPersistenceService; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupTwo; import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.SOURCE; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -298,4 +305,55 @@ public void testMaxQueryGroupCount() { queryGroupPersistenceService.setMaxQueryGroupCount(50); assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); } + + /** + * Tests delete a single QueryGroup + */ + public void testDeleteSingleQueryGroup() { + ClusterState newClusterState = queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_TWO, clusterState()); + Map afterDeletionGroups = newClusterState.getMetadata().queryGroups(); + assertFalse(afterDeletionGroups.containsKey(_ID_TWO)); + assertEquals(1, afterDeletionGroups.size()); + List oldQueryGroups = new ArrayList<>(); + oldQueryGroups.add(queryGroupOne); + assertEqualQueryGroups(new ArrayList<>(afterDeletionGroups.values()), oldQueryGroups); + } + + /** + * Tests delete a QueryGroup with invalid name + */ + public void testDeleteNonExistedQueryGroup() { + assertThrows( + ResourceNotFoundException.class, + () -> queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_NONE_EXISTED, clusterState()) + ); + } + + /** + * Tests DeleteInClusterStateMetadata function + */ + @SuppressWarnings("unchecked") + public void testDeleteInClusterStateMetadata() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(NAME_ONE); + ClusterService clusterService = mock(ClusterService.class); + + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + AckedClusterStateUpdateTask task = invocation.getArgument(1); + ClusterState initialState = clusterState(); + ClusterState newState = task.execute(initialState); + assertNotNull(newState); + assertEquals(queryGroupPersistenceService.deleteQueryGroupThrottlingKey, task.getClusterManagerThrottlingKey()); + task.onAllNodesAcked(null); + verify(listener).onResponse(argThat(response -> response.isAcknowledged())); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any(AckedClusterStateUpdateTask.class)); + } } diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json new file mode 100644 index 0000000000000..16930427fc2fe --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json @@ -0,0 +1,22 @@ +{ + "delete_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group/{name}", + "methods": [ + "DELETE" + ], + "parts": { + "name": { + "type": "string", + "description": "QueryGroup name" + } + } + } + ] + }, + "params":{} + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml index a22dfa2f4477e..a00314986a5cf 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml @@ -106,3 +106,9 @@ - match: { query_groups.0.resiliency_mode: "monitor" } - match: { query_groups.0.resource_limits.cpu: 0.35 } - match: { query_groups.0.resource_limits.memory: 0.25 } + + - do: + delete_query_group_context: + name: "analytics2" + + - match: { acknowledged: true } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 4da6c68b40733..6163fd624c838 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -1397,7 +1397,14 @@ public Builder put(final QueryGroup queryGroup) { return queryGroups(existing); } - public Map getQueryGroups() { + public Builder remove(final QueryGroup queryGroup) { + Objects.requireNonNull(queryGroup, "queryGroup should not be null"); + Map existing = new HashMap<>(getQueryGroups()); + existing.remove(queryGroup.get_id()); + return queryGroups(existing); + } + + private Map getQueryGroups() { return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) .map(o -> (QueryGroupMetadata) o) .map(QueryGroupMetadata::queryGroups) @@ -1830,9 +1837,7 @@ static void validateDataStreams(SortedMap indicesLooku if (dsMetadata != null) { for (DataStream ds : dsMetadata.dataStreams().values()) { String prefix = DataStream.BACKING_INDEX_PREFIX + ds.getName() + "-"; - Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") // '.' is the - // char after - // '-' + Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") .keySet() .stream() .filter(s -> NUMBER_PATTERN.matcher(s.substring(prefix.length())).matches()) From 9e5604b924fc3ad9f8fb61b8782d67f2fc720eb2 Mon Sep 17 00:00:00 2001 From: Sarthak Aggarwal Date: Fri, 23 Aug 2024 17:05:21 +0530 Subject: [PATCH 06/17] [Star Tree] Lucene Abstractions for Star Tree File Formats (#15278) --------- Signed-off-by: Sarthak Aggarwal --- .../Lucene90DocValuesConsumerWrapper.java | 46 +++++++ .../Lucene90DocValuesProducerWrapper.java | 46 +++++++ .../SortedNumericDocValuesWriterWrapper.java | 53 ++++++++ .../composite/CompositeCodecFactory.java | 5 + .../LuceneDocValuesConsumerFactory.java | 50 +++++++ .../LuceneDocValuesProducerFactory.java | 60 +++++++++ .../{ => composite99}/Composite99Codec.java | 2 +- .../Composite99DocValuesFormat.java | 2 +- .../Composite99DocValuesReader.java | 5 +- .../Composite99DocValuesWriter.java | 5 +- .../composite/composite99/package-info.java | 12 ++ .../services/org.apache.lucene.codecs.Codec | 2 +- .../opensearch/index/codec/CodecTests.java | 2 +- .../LuceneDocValuesConsumerFactoryTests.java | 85 ++++++++++++ .../LuceneDocValuesProducerFactoryTests.java | 124 ++++++++++++++++++ ...tedNumericDocValuesWriterWrapperTests.java | 94 +++++++++++++ .../StarTreeDocValuesFormatTests.java | 2 +- 17 files changed, 588 insertions(+), 7 deletions(-) create mode 100644 server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java create mode 100644 server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java create mode 100644 server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java rename server/src/main/java/org/opensearch/index/codec/composite/{ => composite99}/Composite99Codec.java (97%) rename server/src/main/java/org/opensearch/index/codec/composite/{ => composite99}/Composite99DocValuesFormat.java (97%) rename server/src/main/java/org/opensearch/index/codec/composite/{ => composite99}/Composite99DocValuesReader.java (91%) rename server/src/main/java/org/opensearch/index/codec/composite/{ => composite99}/Composite99DocValuesWriter.java (97%) create mode 100644 server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java new file mode 100644 index 0000000000000..67ee45f4c9306 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is an abstraction of the {@link DocValuesConsumer} for the Star Tree index structure. + * It is responsible to consume various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesConsumerWrapper implements Closeable { + + private final Lucene90DocValuesConsumer lucene90DocValuesConsumer; + + public Lucene90DocValuesConsumerWrapper( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesConsumer = new Lucene90DocValuesConsumer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public Lucene90DocValuesConsumer getLucene90DocValuesConsumer() { + return lucene90DocValuesConsumer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesConsumer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java new file mode 100644 index 0000000000000..a213852c59094 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.SegmentReadState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is a custom abstraction of the {@link DocValuesProducer} for the Star Tree index structure. + * It is responsible for providing access to various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesProducerWrapper implements Closeable { + + private final Lucene90DocValuesProducer lucene90DocValuesProducer; + + public Lucene90DocValuesProducerWrapper( + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesProducer = new Lucene90DocValuesProducer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public DocValuesProducer getLucene90DocValuesProducer() { + return lucene90DocValuesProducer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesProducer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java new file mode 100644 index 0000000000000..f7759fcced284 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.index; + +import org.apache.lucene.util.Counter; + +/** + * A wrapper class for writing sorted numeric doc values. + *

+ * This class provides a convenient way to add sorted numeric doc values to a field + * and retrieve the corresponding {@link SortedNumericDocValues} instance. + * + * @opensearch.experimental + */ +public class SortedNumericDocValuesWriterWrapper { + + private final SortedNumericDocValuesWriter sortedNumericDocValuesWriter; + + /** + * Sole constructor. Constructs a new {@link SortedNumericDocValuesWriterWrapper} instance. + * + * @param fieldInfo the field information for the field being written + * @param counter a counter for tracking memory usage + */ + public SortedNumericDocValuesWriterWrapper(FieldInfo fieldInfo, Counter counter) { + sortedNumericDocValuesWriter = new SortedNumericDocValuesWriter(fieldInfo, counter); + } + + /** + * Adds a value to the sorted numeric doc values for the specified document. + * + * @param docID the document ID + * @param value the value to add + */ + public void addValue(int docID, long value) { + sortedNumericDocValuesWriter.addValue(docID, value); + } + + /** + * Returns the {@link SortedNumericDocValues} instance containing the sorted numeric doc values + * + * @return the {@link SortedNumericDocValues} instance + */ + public SortedNumericDocValues getDocValues() { + return sortedNumericDocValuesWriter.getDocValues(); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java index 3acedc6a27d7f..99691d7061ac9 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java @@ -12,6 +12,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.mapper.MapperService; import java.util.HashMap; @@ -29,6 +30,10 @@ */ @ExperimentalApi public class CompositeCodecFactory { + + // we can use this to track the latest composite codec + public static final String COMPOSITE_CODEC = Composite99Codec.COMPOSITE_INDEX_CODEC_NAME; + public CompositeCodecFactory() {} public Map getCompositeIndexCodecs(MapperService mapperService, Logger logger) { diff --git a/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java new file mode 100644 index 0000000000000..1ed672870337e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.lucene90.Lucene90DocValuesConsumerWrapper; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.IOException; + +/** + * A factory class that provides a factory method for creating {@link DocValuesConsumer} instances + * for the latest composite codec. + *

+ * The segments are written using the latest composite codec. The codec + * internally manages calling the appropriate consumer factory for its abstractions. + *

+ * This design ensures forward compatibility for writing operations + * + * @opensearch.experimental + */ +public class LuceneDocValuesConsumerFactory { + + public static DocValuesConsumer getDocValuesConsumerForCompositeCodec( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + try ( + Lucene90DocValuesConsumerWrapper lucene90DocValuesConsumerWrapper = new Lucene90DocValuesConsumerWrapper( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ) + ) { + return lucene90DocValuesConsumerWrapper.getLucene90DocValuesConsumer(); + } + } + +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java new file mode 100644 index 0000000000000..611a97ffeb834 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene90.Lucene90DocValuesProducerWrapper; +import org.apache.lucene.index.SegmentReadState; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; + +import java.io.IOException; + +/** + * A factory class that provides a factory method for creating {@link DocValuesProducer} instances + * based on the specified composite codec. + *

+ * In producers, we want to ensure compatibility with older codec versions during the segment reads. + * This approach allows for writing with only the latest codec while maintaining + * the ability to read data encoded with any codec version present in the segment. + *

+ * This design ensures backward compatibility for reads across different codec versions. + * + * @opensearch.experimental + */ +public class LuceneDocValuesProducerFactory { + + public static DocValuesProducer getDocValuesProducerForCompositeCodec( + String compositeCodec, + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + + switch (compositeCodec) { + case Composite99Codec.COMPOSITE_INDEX_CODEC_NAME: + try ( + Lucene90DocValuesProducerWrapper lucene90DocValuesProducerWrapper = new Lucene90DocValuesProducerWrapper( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ) + ) { + return lucene90DocValuesProducerWrapper.getLucene90DocValuesProducer(); + } + default: + throw new IllegalStateException("Invalid composite codec " + "[" + compositeCodec + "]"); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java index de04944e67cd2..8422932e937c2 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java index 216ed4f68f333..e8c69b11b7c88 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesFormat; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java similarity index 91% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java index df5008a7f294e..e3bfe01cfa2d5 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.BinaryDocValues; @@ -17,6 +17,9 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.CompositeIndexValues; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java index 6ed1a8c42e380..da784e1232800 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesProducer; @@ -18,6 +18,9 @@ import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.CompositeIndexValues; import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java new file mode 100644 index 0000000000000..3d6f130b9a7c8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Responsible for handling all composite index codecs and operations associated with Composite99 codec + */ +package org.opensearch.index.codec.composite.composite99; diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec index e030a813373c1..f51452c57f975 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -1 +1 @@ -org.opensearch.index.codec.composite.Composite99Codec +org.opensearch.index.codec.composite.composite99.Composite99Codec diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 7146b7dc51753..bbf98b5c32918 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -48,7 +48,7 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; -import org.opensearch.index.codec.composite.Composite99Codec; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; diff --git a/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java new file mode 100644 index 0000000000000..7fb8fe7f68f45 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.UUID; + +public class LuceneDocValuesConsumerFactoryTests extends OpenSearchTestCase { + + private Directory directory; + private final String dataCodec = "data_codec"; + private final String dataExtension = "data_extension"; + private final String metaCodec = "meta_codec"; + private final String metaExtension = "meta_extension"; + + @Before + public void setup() { + directory = newDirectory(); + } + + public void testGetDocValuesConsumerForCompositeCodec() throws IOException { + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + randomInt(), + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + SegmentWriteState state = new SegmentWriteState( + InfoStream.getDefault(), + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + null, + newIOContext(random()) + ); + + DocValuesConsumer consumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + + assertEquals("org.apache.lucene.codecs.lucene90.Lucene90DocValuesConsumer", consumer.getClass().getName()); + assertEquals(CompositeCodecFactory.COMPOSITE_CODEC, Composite99Codec.COMPOSITE_INDEX_CODEC_NAME); + consumer.close(); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java new file mode 100644 index 0000000000000..55d637dfb9cae --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.UUID; + +import static org.mockito.Mockito.mock; + +public class LuceneDocValuesProducerFactoryTests extends OpenSearchTestCase { + + private Directory directory; + private final String dataCodec = "data_codec"; + private final String dataExtension = "data_extension"; + private final String metaCodec = "meta_codec"; + private final String metaExtension = "meta_extension"; + + @Before + public void setup() { + directory = newDirectory(); + } + + public void testGetDocValuesProducerForCompositeCodec99() throws IOException { + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + randomInt(), + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + + // open an consumer first in order for the producer to find the file + SegmentWriteState state = new SegmentWriteState( + InfoStream.getDefault(), + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + null, + newIOContext(random()) + ); + DocValuesConsumer consumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + consumer.close(); + + SegmentReadState segmentReadState = new SegmentReadState( + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + newIOContext(random()) + ); + DocValuesProducer producer = LuceneDocValuesProducerFactory.getDocValuesProducerForCompositeCodec( + Composite99Codec.COMPOSITE_INDEX_CODEC_NAME, + segmentReadState, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + + assertNotNull(producer); + assertEquals("org.apache.lucene.codecs.lucene90.Lucene90DocValuesProducer", producer.getClass().getName()); + producer.close(); + } + + public void testGetDocValuesProducerForCompositeCodec_InvalidCodec() { + SegmentReadState mockSegmentReadState = mock(SegmentReadState.class); + + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + LuceneDocValuesProducerFactory.getDocValuesProducerForCompositeCodec( + "invalid_codec", + mockSegmentReadState, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + }); + + assertNotNull(exception); + assertTrue(exception.getMessage().contains("Invalid composite codec")); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java b/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java new file mode 100644 index 0000000000000..54eead20ef354 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedNumericDocValuesWriterWrapper; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.util.Counter; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class SortedNumericDocValuesWriterWrapperTests extends OpenSearchTestCase { + + private SortedNumericDocValuesWriterWrapper wrapper; + private FieldInfo fieldInfo; + private Counter counter; + + @Override + public void setUp() throws Exception { + super.setUp(); + fieldInfo = new FieldInfo( + "field", + 1, + false, + false, + true, + IndexOptions.NONE, + DocValuesType.NONE, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + counter = Counter.newCounter(); + wrapper = new SortedNumericDocValuesWriterWrapper(fieldInfo, counter); + } + + public void testAddValue() throws IOException { + wrapper.addValue(0, 10); + wrapper.addValue(1, 20); + wrapper.addValue(2, 30); + + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(10, docValues.nextValue()); + assertEquals(1, docValues.nextDoc()); + assertEquals(20, docValues.nextValue()); + assertEquals(2, docValues.nextDoc()); + assertEquals(30, docValues.nextValue()); + } + + public void testGetDocValues() { + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + } + + public void testMultipleValues() throws IOException { + wrapper.addValue(0, 10); + wrapper.addValue(0, 20); + wrapper.addValue(1, 30); + + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(10, docValues.nextValue()); + assertEquals(20, docValues.nextValue()); + assertThrows(IllegalStateException.class, docValues::nextValue); + + assertEquals(1, docValues.nextDoc()); + assertEquals(30, docValues.nextValue()); + assertThrows(IllegalStateException.class, docValues::nextValue); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java index 6fa88215cad48..54a9cc035d7a9 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java @@ -29,7 +29,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.MapperTestUtils; -import org.opensearch.index.codec.composite.Composite99Codec; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.IndicesModule; import org.junit.After; From dac646011f6c630f291fd10c1609dc3206480943 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Fri, 23 Aug 2024 19:48:16 +0530 Subject: [PATCH 07/17] [Star tree] Changes to handle derived metrics such as avg as part of star tree mapping (#15152) --------- Signed-off-by: Bharathwaj G --- .../index/mapper/StarTreeMapperIT.java | 20 +- .../compositeindex/datacube/MetricStat.java | 27 ++- .../startree/StarTreeIndexSettings.java | 13 +- .../startree/builder/BaseStarTreeBuilder.java | 3 + .../index/mapper/StarTreeMapper.java | 48 ++++- .../index/mapper/StarTreeMapperTests.java | 183 ++++++++++++++++-- 6 files changed, 247 insertions(+), 47 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 6f5b4bba481dd..c461f83657340 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -265,13 +265,9 @@ public void testValidCompositeIndex() { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( @@ -349,13 +345,9 @@ public void testUpdateIndexWhenMappingIsSame() { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java index df3b2229d2c5b..84eaaeb637962 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java @@ -10,6 +10,9 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.util.Arrays; +import java.util.List; + /** * Supported metric types for composite index * @@ -18,21 +21,39 @@ @ExperimentalApi public enum MetricStat { VALUE_COUNT("value_count"), - AVG("avg"), SUM("sum"), MIN("min"), - MAX("max"); + MAX("max"), + AVG("avg", VALUE_COUNT, SUM); private final String typeName; + private final MetricStat[] baseMetrics; - MetricStat(String typeName) { + MetricStat(String typeName, MetricStat... baseMetrics) { this.typeName = typeName; + this.baseMetrics = baseMetrics; } public String getTypeName() { return typeName; } + /** + * Return the list of metrics that this metric is derived from + * For example, AVG is derived from COUNT and SUM + */ + public List getBaseMetrics() { + return Arrays.asList(baseMetrics); + } + + /** + * Return true if this metric is derived from other metrics + * For example, AVG is derived from COUNT and SUM + */ + public boolean isDerivedMetric() { + return baseMetrics != null && baseMetrics.length > 0; + } + public static MetricStat fromTypeName(String typeName) { for (MetricStat metric : MetricStat.values()) { if (metric.getTypeName().equalsIgnoreCase(typeName)) { diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java index 6535f8ed11da3..ce389a99b3626 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java @@ -15,6 +15,7 @@ import java.util.Arrays; import java.util.List; +import java.util.function.Function; /** * Index settings for star tree fields. The settings are final as right now @@ -93,16 +94,10 @@ public class StarTreeIndexSettings { /** * Default metrics for metrics as part of star tree fields */ - public static final Setting> DEFAULT_METRICS_LIST = Setting.listSetting( + public static final Setting> DEFAULT_METRICS_LIST = Setting.listSetting( "index.composite_index.star_tree.field.default.metrics", - Arrays.asList( - MetricStat.AVG.toString(), - MetricStat.VALUE_COUNT.toString(), - MetricStat.SUM.toString(), - MetricStat.MAX.toString(), - MetricStat.MIN.toString() - ), - MetricStat::fromTypeName, + Arrays.asList(MetricStat.VALUE_COUNT.toString(), MetricStat.SUM.toString()), + Function.identity(), Setting.Property.IndexScope, Setting.Property.Final ); diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index 90b2d0727d572..3fc8d24e6e0d2 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -118,6 +118,9 @@ public List generateMetricAggregatorInfos(MapperService ma List metricAggregatorInfos = new ArrayList<>(); for (Metric metric : this.starTreeField.getMetrics()) { for (MetricStat metricStat : metric.getMetrics()) { + if (metricStat.isDerivedMetric()) { + continue; + } IndexNumericFieldData.NumericType numericType; Mapper fieldMapper = mapperService.documentMapper().mappers().getMapper(metric.getField()); if (fieldMapper instanceof NumberFieldMapper) { diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index d9539f9dc0c82..93764e93ae30d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -28,6 +28,7 @@ import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; @@ -262,17 +263,50 @@ private Metric getMetric(String name, Map metric, Mapper.TypePar .collect(Collectors.toList()); metric.remove(STATS); if (metricStrings.isEmpty()) { - metricTypes = new ArrayList<>(StarTreeIndexSettings.DEFAULT_METRICS_LIST.get(context.getSettings())); - } else { - Set metricSet = new LinkedHashSet<>(); - for (String metricString : metricStrings) { - metricSet.add(MetricStat.fromTypeName(metricString)); - } - metricTypes = new ArrayList<>(metricSet); + metricStrings = new ArrayList<>(StarTreeIndexSettings.DEFAULT_METRICS_LIST.get(context.getSettings())); + } + // Add all required metrics initially + Set metricSet = new LinkedHashSet<>(); + for (String metricString : metricStrings) { + MetricStat metricStat = MetricStat.fromTypeName(metricString); + metricSet.add(metricStat); + addBaseMetrics(metricStat, metricSet); } + addEligibleDerivedMetrics(metricSet); + metricTypes = new ArrayList<>(metricSet); return new Metric(name, metricTypes); } + /** + * Add base metrics of derived metric to metric set + */ + private void addBaseMetrics(MetricStat metricStat, Set metricSet) { + if (metricStat.isDerivedMetric()) { + Queue metricQueue = new LinkedList<>(metricStat.getBaseMetrics()); + while (metricQueue.isEmpty() == false) { + MetricStat metric = metricQueue.poll(); + if (metric.isDerivedMetric() && !metricSet.contains(metric)) { + metricQueue.addAll(metric.getBaseMetrics()); + } + metricSet.add(metric); + } + } + } + + /** + * Add derived metrics if all associated base metrics are present + */ + private void addEligibleDerivedMetrics(Set metricStats) { + for (MetricStat metric : MetricStat.values()) { + if (metric.isDerivedMetric() && !metricStats.contains(metric)) { + List sourceMetrics = metric.getBaseMetrics(); + if (metricStats.containsAll(sourceMetrics)) { + metricStats.add(metric); + } + } + } + } + @Override protected List> getParameters() { return List.of(config); diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 3fa97825cdfc6..6b3b87da89915 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -52,7 +52,7 @@ public void teardown() { } public void testValidStarTree() throws IOException { - MapperService mapperService = createMapperService(getExpandedMapping("status", "size")); + MapperService mapperService = createMapperService(getExpandedMappingWithJustAvg("status", "size")); Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); for (CompositeMappedFieldType type : compositeFieldTypes) { StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; @@ -66,7 +66,65 @@ public void testValidStarTree() throws IOException { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList(MetricStat.SUM, MetricStat.AVG); + + // Assert COUNT and SUM gets added when AVG is defined + List expectedMetrics = Arrays.asList(MetricStat.AVG, MetricStat.VALUE_COUNT, MetricStat.SUM); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals( + new HashSet<>(Arrays.asList("@timestamp", "status")), + starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() + ); + } + } + + public void testMetricsWithJustSum() throws IOException { + MapperService mapperService = createMapperService(getExpandedMappingWithJustSum("status", "size")); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.DAY_OF_MONTH, + Rounding.DateTimeUnit.MONTH_OF_YEAR + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert AVG gets added when both of its base metrics is already present + List expectedMetrics = List.of(MetricStat.SUM); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals( + new HashSet<>(Arrays.asList("@timestamp", "status")), + starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() + ); + } + } + + public void testMetricsWithCountAndSum() throws IOException { + MapperService mapperService = createMapperService(getExpandedMappingWithSumAndCount("status", "size")); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.DAY_OF_MONTH, + Rounding.DateTimeUnit.MONTH_OF_YEAR + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert AVG gets added when both of its base metrics is already present + List expectedMetrics = List.of(MetricStat.SUM, MetricStat.VALUE_COUNT, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); @@ -92,13 +150,7 @@ public void testValidStarTreeDefaults() throws IOException { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("status", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); @@ -109,7 +161,7 @@ public void testValidStarTreeDefaults() throws IOException { public void testInvalidDim() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getExpandedMapping("invalid", "size")) + () -> createMapperService(getExpandedMappingWithJustAvg("invalid", "size")) ); assertEquals("Failed to parse mapping [_doc]: unknown dimension field [invalid]", ex.getMessage()); } @@ -117,7 +169,7 @@ public void testInvalidDim() { public void testInvalidMetric() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getExpandedMapping("status", "invalid")) + () -> createMapperService(getExpandedMappingWithJustAvg("status", "invalid")) ); assertEquals("Failed to parse mapping [_doc]: unknown metric field [invalid]", ex.getMessage()); } @@ -232,6 +284,9 @@ public void testMetric() { assertEquals(MetricStat.MIN, MetricStat.fromTypeName("min")); assertEquals(MetricStat.SUM, MetricStat.fromTypeName("sum")); assertEquals(MetricStat.AVG, MetricStat.fromTypeName("avg")); + + assertEquals(List.of(MetricStat.VALUE_COUNT, MetricStat.SUM), MetricStat.AVG.getBaseMetrics()); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> MetricStat.fromTypeName("invalid")); assertEquals("Invalid metric stat: invalid", ex.getMessage()); } @@ -310,7 +365,7 @@ public void testStarTreeField() { } public void testValidations() throws IOException { - MapperService mapperService = createMapperService(getExpandedMapping("status", "size")); + MapperService mapperService = createMapperService(getExpandedMappingWithJustAvg("status", "size")); Settings settings = Settings.builder().put(CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING.getKey(), true).build(); CompositeIndexSettings enabledCompositeIndexSettings = new CompositeIndexSettings( settings, @@ -370,7 +425,7 @@ public void testValidations() throws IOException { ); } - private XContentBuilder getExpandedMapping(String dim, String metric) throws IOException { + private XContentBuilder getExpandedMappingWithJustAvg(String dim, String metric) throws IOException { return topMapping(b -> { b.startObject("composite"); b.startObject("startree"); @@ -399,7 +454,6 @@ private XContentBuilder getExpandedMapping(String dim, String metric) throws IOE b.startObject(); b.field("name", metric); b.startArray("stats"); - b.value("sum"); b.value("avg"); b.endArray(); b.endObject(); @@ -421,6 +475,107 @@ private XContentBuilder getExpandedMapping(String dim, String metric) throws IOE }); } + private XContentBuilder getExpandedMappingWithJustSum(String dim, String metric) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 100); + b.startArray("skip_star_node_creation_for_dimensions"); + { + b.value("@timestamp"); + b.value("status"); + } + b.endArray(); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("day"); + b.value("month"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", dim); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", metric); + b.startArray("stats"); + b.value("sum"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); + } + + private XContentBuilder getExpandedMappingWithSumAndCount(String dim, String metric) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 100); + b.startArray("skip_star_node_creation_for_dimensions"); + { + b.value("@timestamp"); + b.value("status"); + } + b.endArray(); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("day"); + b.value("month"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", dim); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", metric); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); + } + private XContentBuilder getMinMapping() throws IOException { return getMinMapping(false, false, false, false); } From beda6164c09f39ceb4dd832e3debdbcb7f7c8f3e Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Fri, 23 Aug 2024 10:42:05 -0700 Subject: [PATCH 08/17] [Bugfix] Fixes IRC NPE bug for timed-out cacheable queries (#15327) * Fix IRC timeout bug Signed-off-by: Peter Alfonsi * addressed Sagar's comments Signed-off-by: Peter Alfonsi * addressed Ankit's comments Signed-off-by: Peter Alfonsi * Add UT for test coverage Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * tweak imports in new UT Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi * rerun gradle Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Co-authored-by: Peter Alfonsi --- .../indices/IndicesRequestCacheIT.java | 63 +++++++++++++++++++ .../indices/IndicesRequestCache.java | 11 ++-- .../opensearch/indices/IndicesService.java | 4 +- .../indices/IndicesServiceTests.java | 35 +++++++++++ 4 files changed, 105 insertions(+), 8 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 09d5c208a8756..108ef14f0fcb4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -34,6 +34,12 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -56,7 +62,10 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -65,6 +74,7 @@ import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.time.ZoneId; @@ -768,6 +778,59 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { assertTrue(stats.getMemorySizeInBytes() == 0); } + public void testTimedOutQuery() throws Exception { + // A timed out query should be cached and then invalidated + Client client = client(); + String index = "index"; + assertAcked( + client.admin() + .indices() + .prepareCreate(index) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + // Disable index refreshing to avoid cache being invalidated mid-test + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) + ) + .get() + ); + indexRandom(true, client.prepareIndex(index).setSource("k", "hello")); + ensureSearchable(index); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + forceMerge(client, index); + + QueryBuilder timeoutQueryBuilder = new TermQueryBuilder("k", "hello") { + @Override + protected Query doToQuery(QueryShardContext context) { + return new TermQuery(new Term("k", "hello")) { + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + // Create the weight before sleeping. Otherwise, TermStates.build() (in the call to super.createWeight()) will + // sometimes throw an exception on timeout, rather than timing out gracefully. + Weight result = super.createWeight(searcher, scoreMode, boost); + try { + Thread.sleep(500); + } catch (InterruptedException ignored) {} + return result; + } + }; + } + }; + + SearchResponse resp = client.prepareSearch(index) + .setRequestCache(true) + .setQuery(timeoutQueryBuilder) + .setTimeout(TimeValue.ZERO) + .get(); + assertTrue(resp.isTimedOut()); + RequestCacheStats requestCacheStats = getRequestCacheStats(client, index); + // The cache should be empty as the timed-out query was invalidated + assertEquals(0, requestCacheStats.getMemorySizeInBytes()); + } + private Path[] shardDirectory(String server, Index index, int shard) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); final Path[] paths = env.availableShardPaths(new ShardId(index, shard)); diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 93946fa11de13..71f8cf5a78ec5 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -310,12 +310,11 @@ BytesReference getOrCompute( * @param cacheKey the cache key to invalidate */ void invalidate(IndicesService.IndexShardCacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) { - assert reader.getReaderCacheHelper() != null; - String readerCacheKeyId = null; - if (reader instanceof OpenSearchDirectoryReader) { - IndexReader.CacheHelper cacheHelper = ((OpenSearchDirectoryReader) reader).getDelegatingCacheHelper(); - readerCacheKeyId = ((OpenSearchDirectoryReader.DelegatingCacheHelper) cacheHelper).getDelegatingCacheKey().getId(); - } + assert reader.getReaderCacheHelper() instanceof OpenSearchDirectoryReader.DelegatingCacheHelper; + OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = (OpenSearchDirectoryReader.DelegatingCacheHelper) reader + .getReaderCacheHelper(); + String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); + IndexShard indexShard = (IndexShard) cacheEntity.getCacheIdentity(); cache.invalidate(getICacheKey(new Key(indexShard.shardId(), cacheKey, readerCacheKeyId, System.identityHashCode(indexShard)))); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 902ca95643625..a78328e24c588 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -68,6 +68,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader.DelegatingCacheHelper; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -1754,8 +1755,7 @@ public boolean canCache(ShardSearchRequest request, SearchContext context) { if (context.getQueryShardContext().isCacheable() == false) { return false; } - return true; - + return context.searcher().getDirectoryReader().getReaderCacheHelper() instanceof DelegatingCacheHelper; } /** diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 6757dbc184961..b5350a39e8599 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -31,12 +31,15 @@ package org.opensearch.indices; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.IndexShardStats; +import org.opensearch.action.search.SearchType; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexGraveyard; @@ -44,6 +47,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader.DelegatingCacheHelper; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -76,8 +80,11 @@ import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.test.TestSearchContext; import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -627,4 +634,32 @@ public void testClusterRemoteTranslogBufferIntervalDefault() { indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval() ); } + + public void testDirectoryReaderWithoutDelegatingCacheHelperNotCacheable() throws IOException { + IndicesService indicesService = getIndicesService(); + final IndexService indexService = createIndex("test"); + ShardSearchRequest request = mock(ShardSearchRequest.class); + when(request.requestCache()).thenReturn(true); + + TestSearchContext context = new TestSearchContext(indexService.getBigArrays(), indexService) { + @Override + public SearchType searchType() { + return SearchType.QUERY_THEN_FETCH; + } + }; + + ContextIndexSearcher searcher = mock(ContextIndexSearcher.class); + context.setSearcher(searcher); + DirectoryReader reader = mock(DirectoryReader.class); + when(searcher.getDirectoryReader()).thenReturn(reader); + when(searcher.getIndexReader()).thenReturn(reader); + IndexReader.CacheHelper notDelegatingCacheHelper = mock(IndexReader.CacheHelper.class); + DelegatingCacheHelper delegatingCacheHelper = mock(DelegatingCacheHelper.class); + + for (boolean useDelegatingCacheHelper : new boolean[] { true, false }) { + IndexReader.CacheHelper cacheHelper = useDelegatingCacheHelper ? delegatingCacheHelper : notDelegatingCacheHelper; + when(reader.getReaderCacheHelper()).thenReturn(cacheHelper); + assertEquals(useDelegatingCacheHelper, indicesService.canCache(request, context)); + } + } } From 0eb63bdab7a602a85d03601970e6a699a015211d Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 23 Aug 2024 14:09:37 -0400 Subject: [PATCH 09/17] Bump opentelemetry from 1.40.0 to 1.41.0, opentelemetry-semconv from 1.26.0-alpha to 1.27.0-alpha (#15361) Signed-off-by: Andriy Redko --- CHANGELOG.md | 2 ++ buildSrc/version.properties | 4 ++-- plugins/telemetry-otel/build.gradle | 4 +++- .../telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 | 1 + .../opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 | 1 - .../opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 | 1 + .../licenses/opentelemetry-context-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-context-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 | 1 + .../opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 | 1 - .../opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 | 1 + .../opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 | 1 - .../opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 | 1 + .../telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 | 1 + .../licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 | 1 - .../licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 | 1 + 31 files changed, 21 insertions(+), 17 deletions(-) delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index cd02af4f625b9..021a459352ba7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.azure:azure-core-http-netty` from 1.15.1 to 1.15.3 ([#15300](https://github.com/opensearch-project/OpenSearch/pull/15300)) - Bump `com.gradle.develocity` from 3.17.6 to 3.18 ([#15297](https://github.com/opensearch-project/OpenSearch/pull/15297)) - Bump `commons-cli:commons-cli` from 1.8.0 to 1.9.0 ([#15298](https://github.com/opensearch-project/OpenSearch/pull/15298)) +- Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 9d7bbf6f8f769..ccec8e2891a65 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -74,5 +74,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.40.0 -opentelemetrysemconv = 1.26.0-alpha +opentelemetry = 1.41.0 +opentelemetrysemconv = 1.27.0-alpha diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 66d172e3dc7f3..872d928aa093f 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -86,7 +86,9 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'kotlin.io.path.PathsKt', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', - 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener' + 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.StructuredConfigProperties' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 deleted file mode 100644 index 04ec81edf969c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6db562f2b74ffaa7253d740e9aa7a3c4f2e392ec \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ead8fb235fa12 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 @@ -0,0 +1 @@ +ec5ad3b420c9fba4b340e85a3199fd0f2accd023 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 deleted file mode 100644 index bcd7c886b5f6c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43115633361430a3c6aaa39fd78363014ac79270 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..b601a4fb5246f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 @@ -0,0 +1 @@ +fd387313cc37a6e93062e9a80a2526634d22cb19 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 deleted file mode 100644 index 9716ec518c886..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf1db0f288b9baaabdb439ab6179b673b751511e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..74b7cb25cdfe5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 @@ -0,0 +1 @@ +3d7cf15ef425053e24e825160ca7b4ac08d721aa \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 deleted file mode 100644 index c0e79b05aa675..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b883b179c242a1761df2d408fe01ec41b17327a3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..d8d8f75850cb6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +cf92f4c1b60c2359c12f6f323f6a2a623c333910 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 deleted file mode 100644 index 1df0ad183c475..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8c1f9b05ac9fb1259517cf53950ccecaf84ebe1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..3e1212943f894 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 @@ -0,0 +1 @@ +8dee21440b811004ecc1c36c1cd44f9d3494546c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 deleted file mode 100644 index ebeb639a8459c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d8b92bcdb0ace48fb5764cc1ad7a0de197d5b8c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..21a29cc8445e5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d86e60b6d49e389ebe5797d42a7288a20d30c162 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 deleted file mode 100644 index b630c808d4763..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80fa10130cc7e7626e2581aa7c5871eab7381889 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ae522ac698aa8 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +aeba3075b8dfd97779edadc0a3711d999bb0e396 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 deleted file mode 100644 index eda90dc825e6f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -006dcdbf8eb911ad4d11c54fa824f5a97f582850 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..a741d0a167d60 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +368d7905d6a0a313c63e3a91f895a3a08500519e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 deleted file mode 100644 index cdd7dc6551b33..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -59f260c5412b79a5a40c7d433600248727cd195a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..972e7de1c74be --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 @@ -0,0 +1 @@ +c740e8f7d0d914d6acd310ac53901bb8753c6e8d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 deleted file mode 100644 index 668291498bbae..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7042214012232a5d6a251aca4aa5932014a4946b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..c56ca0b9e8169 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +b820861f85ba83db0ad896c47f723208d7473d5a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 deleted file mode 100644 index 74f0786e21954..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c6b884d65f79d40429263ac0ab7ed1422237837 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..39db6cb73727f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 @@ -0,0 +1 @@ +f88ee292f5605c87dfe85c8d90131bce9f0b3b8e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 deleted file mode 100644 index 23ef1bf6e6b2c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1c9b33a8660ace82aecb7f1c7ea50093dc87f0a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..6dcd496e033d3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 @@ -0,0 +1 @@ +9d1200befb28e3e9f61073ac3de23cc55e509dc7 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 deleted file mode 100644 index aea753f0df18b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5145f077bf2821ad243617baf8c1810d29af8566 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..161e400f87077 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d9bbc2e2e800317d72fbf3141ae8391e95fa6229 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 7124dcb31da3f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -955de1d2de4d3d2bb6ba2498f19c9a06da2f3956 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..e986b4b53388e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 @@ -0,0 +1 @@ +906d916bee46f60260c09314284b5948c54a0662 \ No newline at end of file From 2301adf9a0e2921d2fa359c51ee5c87309ca71ad Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Fri, 23 Aug 2024 17:13:17 -0700 Subject: [PATCH 10/17] Add SplitResponseProcessor to allowlist (#15393) Signed-off-by: Daniel Widdis --- CHANGELOG.md | 1 + .../pipeline/common/SearchPipelineCommonModulePlugin.java | 2 ++ .../pipeline/common/SearchPipelineCommonModulePluginTests.java | 3 ++- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 021a459352ba7..387aaf94bbe5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fixed array field name omission in flat_object function for nested JSON ([#13620](https://github.com/opensearch-project/OpenSearch/pull/13620)) - Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) - Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) +- Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) ### Security diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index 2a2de9debb9d9..488b9e632aa2a 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -97,6 +97,8 @@ public Map> getResponseProces new TruncateHitsResponseProcessor.Factory(), CollapseResponseProcessor.TYPE, new CollapseResponseProcessor.Factory(), + SplitResponseProcessor.TYPE, + new SplitResponseProcessor.Factory(), SortResponseProcessor.TYPE, new SortResponseProcessor.Factory() ) diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java index 404842742629c..e10f06da29ba0 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java @@ -47,6 +47,7 @@ public void testResponseProcessorAllowlist() throws IOException { List.of("rename_field", "truncate_hits", "collapse"), SearchPipelineCommonModulePlugin::getResponseProcessors ); + runAllowlistTest(key, List.of("split", "sort"), SearchPipelineCommonModulePlugin::getResponseProcessors); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -82,7 +83,7 @@ public void testAllowlistNotSpecified() throws IOException { try (SearchPipelineCommonModulePlugin plugin = new SearchPipelineCommonModulePlugin()) { assertEquals(Set.of("oversample", "filter_query", "script"), plugin.getRequestProcessors(createParameters(settings)).keySet()); assertEquals( - Set.of("rename_field", "truncate_hits", "collapse", "sort"), + Set.of("rename_field", "truncate_hits", "collapse", "split", "sort"), plugin.getResponseProcessors(createParameters(settings)).keySet() ); assertEquals(Set.of(), plugin.getSearchPhaseResultsProcessors(createParameters(settings)).keySet()); From 6152afeef3ceb3fe4f18f6be2d55ac60256bc2db Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Sat, 24 Aug 2024 09:54:11 -0700 Subject: [PATCH 11/17] Fix unchecked cast in dynamic action map getter (#15394) Signed-off-by: Daniel Widdis --- CHANGELOG.md | 1 + .../src/main/java/org/opensearch/action/ActionModule.java | 7 +++++-- .../org/opensearch/action/DynamicActionRegistryTests.java | 5 +++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 387aaf94bbe5c..deea2778dedd2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) - Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) - Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) +- Fix unchecked cast in dynamic action map getter ([#15394](https://github.com/opensearch-project/OpenSearch/pull/15394)) ### Security diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 574b7029a6501..c86e6580122d5 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -1200,9 +1200,12 @@ public void unregisterDynamicRoute(NamedRoute route) { * @param route The {@link RestHandler.Route}. * @return the corresponding {@link RestSendToExtensionAction} if it is registered, null otherwise. */ - @SuppressWarnings("unchecked") public RestSendToExtensionAction get(RestHandler.Route route) { - return routeRegistry.get(route); + if (route instanceof NamedRoute) { + return routeRegistry.get((NamedRoute) route); + } + // Only NamedRoutes are map keys so any other route is not in the map + return null; } } } diff --git a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java index 20c2b1f17124c..1e2b29287acb3 100644 --- a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java +++ b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java @@ -20,6 +20,7 @@ import org.opensearch.extensions.action.ExtensionTransportAction; import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.rest.NamedRoute; +import org.opensearch.rest.RestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; @@ -85,13 +86,17 @@ public void testDynamicActionRegistryWithNamedRoutes() { RestSendToExtensionAction action2 = mock(RestSendToExtensionAction.class); NamedRoute r1 = new NamedRoute.Builder().method(RestRequest.Method.GET).path("/foo").uniqueName("foo").build(); NamedRoute r2 = new NamedRoute.Builder().method(RestRequest.Method.PUT).path("/bar").uniqueName("bar").build(); + RestHandler.Route r3 = new RestHandler.Route(RestRequest.Method.DELETE, "/foo"); DynamicActionRegistry registry = new DynamicActionRegistry(); registry.registerDynamicRoute(r1, action); registry.registerDynamicRoute(r2, action2); assertTrue(registry.isActionRegistered("foo")); + assertEquals(action, registry.get(r1)); assertTrue(registry.isActionRegistered("bar")); + assertEquals(action2, registry.get(r2)); + assertNull(registry.get(r3)); registry.unregisterDynamicRoute(r2); From 689cfcac8367e289ad67cbcfe91ebd75d50f07d0 Mon Sep 17 00:00:00 2001 From: Shivansh Arora Date: Mon, 26 Aug 2024 00:27:51 +0530 Subject: [PATCH 12/17] [Remote State] Disable remote publication if remote state disabled (#15219) * Disable remote publication if remote state disabled Signed-off-by: Shivansh Arora --- .../remote/RemoteStatePublicationIT.java | 24 ++++++++++++++++--- .../coordination/CoordinationState.java | 3 ++- .../coordination/CoordinationStateTests.java | 14 ++++++++++- 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java index 07d6e1379ced8..6a2e7ce4957ae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java @@ -50,15 +50,22 @@ public class RemoteStatePublicationIT extends RemoteStoreBaseIntegTestCase { private static String INDEX_NAME = "test-index"; + private boolean isRemoteStateEnabled = true; + private String isRemotePublicationEnabled = "true"; @Before public void setup() { asyncUploadMockFsRepo = false; + isRemoteStateEnabled = true; + isRemotePublicationEnabled = "true"; } @Override protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); + return Settings.builder() + .put(super.featureFlagSettings()) + .put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, isRemotePublicationEnabled) + .build(); } @Override @@ -76,7 +83,7 @@ protected Settings nodeSettings(int nodeOrdinal) { ); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), isRemoteStateEnabled) .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) @@ -136,6 +143,18 @@ public void testPublication() throws Exception { } } + public void testRemotePublicationDisableIfRemoteStateDisabled() { + // only disable remote state + isRemoteStateEnabled = false; + // create cluster with multi node with in-consistent settings + prepareCluster(3, 2, INDEX_NAME, 1, 2); + // assert cluster is stable, ensuring publication falls back to legacy transport with inconsistent settings + ensureStableCluster(5); + ensureGreen(INDEX_NAME); + + assertNull(internalCluster().getCurrentClusterManagerNodeInstance(RemoteClusterStateService.class)); + } + private Map getMetadataFiles(BlobStoreRepository repository, String subDirectory) throws IOException { BlobPath metadataPath = repository.basePath() .add( @@ -151,5 +170,4 @@ private Map getMetadataFiles(BlobStoreRepository repository, St return fileName.split(DELIMITER)[0]; }).collect(Collectors.toMap(Function.identity(), key -> 1, Integer::sum)); } - } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 7fa63ae8abc62..c7820c2c9a365 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -105,7 +105,8 @@ public CoordinationState( .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); - this.isRemotePublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + this.isRemotePublicationEnabled = isRemoteStateEnabled + && FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) && localNode.isRemoteStatePublicationEnabled(); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index e74962dcbba1b..3ee2278aec546 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -66,6 +66,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -971,7 +973,7 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) .put(stateRepoTypeAttributeKey, FsRepository.TYPE) .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") - .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); @@ -1002,6 +1004,16 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep ); } + public void testIsRemotePublicationEnabled_WithInconsistentSettings() { + // create settings with remote state disabled but publication enabled + Settings settings = Settings.builder() + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) + .put(REMOTE_PUBLICATION_EXPERIMENTAL, true) + .build(); + CoordinationState coordinationState = createCoordinationState(psr1, node1, settings); + assertFalse(coordinationState.isRemotePublicationEnabled()); + } + public static CoordinationState createCoordinationState( PersistedStateRegistry persistedStateRegistry, DiscoveryNode localNode, From f19528573dc3a16e4417550d40374afd471f8997 Mon Sep 17 00:00:00 2001 From: Dmitry Kryukov Date: Mon, 26 Aug 2024 02:22:04 +0300 Subject: [PATCH 13/17] Compare numbers with equals() instead of == (#15366) Signed-off-by: Dmitry Kryukov --- .../java/org/opensearch/transport/nio/MockNioTransport.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java index cd6bf02efef6f..9956c651618d3 100644 --- a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java @@ -467,7 +467,7 @@ private void logLongRunningExecutions() { final Thread thread = entry.getKey(); final String stackTrace = Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n")); final Thread.State threadState = thread.getState(); - if (blockedSinceInNanos == registry.get(thread)) { + if (blockedSinceInNanos.equals(registry.get(thread))) { logger.warn( "Potentially blocked execution on network thread [{}] [{}] [{} milliseconds]: \n{}", thread.getName(), From 90148942a56fa6a4840ad2afed195071f2d3c8e6 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Mon, 26 Aug 2024 21:46:18 +0530 Subject: [PATCH 14/17] Add pinned timestamp utils and setting to enable/disable the feature (#15401) Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../RemoteStorePinnedTimestampsIT.java | 10 + .../common/settings/ClusterSettings.java | 1 + .../index/remote/RemoteStoreUtils.java | 176 ++++++ .../store/RemoteSegmentStoreDirectory.java | 5 + .../indices/RemoteStoreSettings.java | 15 + .../main/java/org/opensearch/node/Node.java | 3 +- .../index/remote/RemoteStoreUtilsTests.java | 545 ++++++++++++++++++ 7 files changed, 754 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java index 05ff738d2df0b..cb91c63e17245 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java @@ -9,8 +9,10 @@ package org.opensearch.remotestore; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.test.OpenSearchIntegTestCase; @@ -20,6 +22,14 @@ public class RemoteStorePinnedTimestampsIT extends RemoteStoreBaseIntegTestCase { static final String INDEX_NAME = "remote-store-test-idx-1"; + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .build(); + } + ActionListener noOpActionListener = new ActionListener<>() { @Override public void onResponse(Void unused) {} diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 49ef87838ed2e..8daf9125bb27e 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -761,6 +761,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED, SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index a5e0c10f72301..b2bc8a0294a49 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -20,20 +20,27 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; import java.util.function.Function; import java.util.stream.Collectors; @@ -373,4 +380,173 @@ public static boolean isSwitchToStrictCompatibilityMode(ClusterUpdateSettingsReq incomingSettings ) == RemoteStoreNodeService.CompatibilityMode.STRICT; } + + /** + * Determines and returns a set of metadata files that match provided pinned timestamps. + * + * This method is an overloaded version of getPinnedTimestampLockedFiles and do not use cached entries to find + * the metadata file + * + * @param metadataFiles A list of metadata file names. Expected to be sorted in descending order of timestamp. + * @param pinnedTimestampSet A set of timestamps representing pinned points in time. + * @param getTimestampFunction A function that extracts the timestamp from a metadata file name. + * @param prefixFunction A function that extracts a tuple of prefix information from a metadata file name. + * @return A set of metadata file names that are implicitly locked based on the pinned timestamps. + */ + public static Set getPinnedTimestampLockedFiles( + List metadataFiles, + Set pinnedTimestampSet, + Function getTimestampFunction, + Function> prefixFunction + ) { + return getPinnedTimestampLockedFiles(metadataFiles, pinnedTimestampSet, new HashMap<>(), getTimestampFunction, prefixFunction); + } + + /** + * Determines and returns a set of metadata files that match provided pinned timestamps. If pinned timestamp + * feature is not enabled, this function is a no-op. + * + * This method identifies metadata files that are considered implicitly locked due to their timestamps + * matching or being the closest preceding timestamp to the pinned timestamps. It uses a caching mechanism + * to improve performance for previously processed timestamps. + * + * The method performs the following steps: + * 1. Validates input parameters. + * 2. Updates the cache (metadataFilePinnedTimestampMap) to remove outdated entries. + * 3. Processes cached entries and identifies new timestamps to process. + * 4. For new timestamps, iterates through metadata files to find matching or closest preceding files. + * 5. Updates the cache with newly processed timestamps and their corresponding metadata files. + * + * @param metadataFiles A list of metadata file names. Expected to be sorted in descending order of timestamp. + * @param pinnedTimestampSet A set of timestamps representing pinned points in time. + * @param metadataFilePinnedTimestampMap A map used for caching processed timestamps and their corresponding metadata files. + * @param getTimestampFunction A function that extracts the timestamp from a metadata file name. + * @param prefixFunction A function that extracts a tuple of prefix information from a metadata file name. + * @return A set of metadata file names that are implicitly locked based on the pinned timestamps. + * + */ + public static Set getPinnedTimestampLockedFiles( + List metadataFiles, + Set pinnedTimestampSet, + Map metadataFilePinnedTimestampMap, + Function getTimestampFunction, + Function> prefixFunction + ) { + Set implicitLockedFiles = new HashSet<>(); + + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return implicitLockedFiles; + } + + if (metadataFiles == null || metadataFiles.isEmpty() || pinnedTimestampSet == null) { + return implicitLockedFiles; + } + + // Remove entries for timestamps that are no longer pinned + metadataFilePinnedTimestampMap.keySet().retainAll(pinnedTimestampSet); + + // Add cached entries and collect new timestamps + Set newPinnedTimestamps = new TreeSet<>(Collections.reverseOrder()); + for (Long pinnedTimestamp : pinnedTimestampSet) { + String cachedFile = metadataFilePinnedTimestampMap.get(pinnedTimestamp); + if (cachedFile != null) { + implicitLockedFiles.add(cachedFile); + } else { + newPinnedTimestamps.add(pinnedTimestamp); + } + } + + if (newPinnedTimestamps.isEmpty()) { + return implicitLockedFiles; + } + + // Sort metadata files in descending order of timestamp + // ToDo: Do we really need this? Files fetched from remote store are already lexicographically sorted. + metadataFiles.sort(String::compareTo); + + // If we have metadata files from multiple writers, it can result in picking file generated by stale primary. + // To avoid this, we fail fast. + RemoteStoreUtils.verifyNoMultipleWriters(metadataFiles, prefixFunction); + + Iterator timestampIterator = newPinnedTimestamps.iterator(); + Long currentPinnedTimestamp = timestampIterator.next(); + long prevMdTimestamp = Long.MAX_VALUE; + for (String metadataFileName : metadataFiles) { + long currentMdTimestamp = getTimestampFunction.apply(metadataFileName); + // We always prefer md file with higher values of prefix like primary term, generation etc. + if (currentMdTimestamp > prevMdTimestamp) { + continue; + } + while (currentMdTimestamp <= currentPinnedTimestamp && prevMdTimestamp > currentPinnedTimestamp) { + implicitLockedFiles.add(metadataFileName); + // Do not cache entry for latest metadata file as the next metadata can also match the same pinned timestamp + if (prevMdTimestamp != Long.MAX_VALUE) { + metadataFilePinnedTimestampMap.put(currentPinnedTimestamp, metadataFileName); + } + if (timestampIterator.hasNext() == false) { + return implicitLockedFiles; + } + currentPinnedTimestamp = timestampIterator.next(); + } + prevMdTimestamp = currentMdTimestamp; + } + + return implicitLockedFiles; + } + + /** + * Filters out metadata files based on their age and pinned timestamps settings. + * + * This method filters a list of metadata files, keeping only those that are older + * than a certain threshold determined by the last successful fetch of pinned timestamps + * and a configured lookback interval. + * + * @param metadataFiles A list of metadata file names to be filtered. + * @param getTimestampFunction A function that extracts a timestamp from a metadata file name. + * @param lastSuccessfulFetchOfPinnedTimestamps The timestamp of the last successful fetch of pinned timestamps. + * @return A new list containing only the metadata files that meet the age criteria. + * If pinned timestamps are not enabled, returns a copy of the input list. + */ + public static List filterOutMetadataFilesBasedOnAge( + List metadataFiles, + Function getTimestampFunction, + long lastSuccessfulFetchOfPinnedTimestamps + ) { + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return new ArrayList<>(metadataFiles); + } + long maximumAllowedTimestamp = lastSuccessfulFetchOfPinnedTimestamps - RemoteStoreSettings.getPinnedTimestampsLookbackInterval() + .getMillis(); + List metadataFilesWithMinAge = new ArrayList<>(); + for (String metadataFileName : metadataFiles) { + long metadataTimestamp = getTimestampFunction.apply(metadataFileName); + if (metadataTimestamp < maximumAllowedTimestamp) { + metadataFilesWithMinAge.add(metadataFileName); + } + } + return metadataFilesWithMinAge; + } + + /** + * Determines if the pinned timestamp state is stale. + * + * This method checks whether the last successful fetch of pinned timestamps + * is considered stale based on the current time and configured intervals. + * The state is considered stale if the last successful fetch occurred before + * a certain threshold, which is calculated as three times the scheduler interval + * plus the lookback interval. + * + * @return true if the pinned timestamp state is stale, false otherwise. + * Always returns false if pinned timestamps are not enabled. + */ + public static boolean isPinnedTimestampStateStale() { + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return false; + } + long lastSuccessfulFetchTimestamp = RemoteStorePinnedTimestampService.getPinnedTimestamps().v1(); + long staleBufferInMillis = (RemoteStoreSettings.getPinnedTimestampsSchedulerInterval().millis() * 3) + RemoteStoreSettings + .getPinnedTimestampsLookbackInterval() + .millis(); + return lastSuccessfulFetchTimestamp < (System.currentTimeMillis() - staleBufferInMillis); + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 8c0ecb4cc783a..9ff97f12015bd 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -349,6 +349,11 @@ static long getGeneration(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[2]); } + public static long getTimestamp(String filename) { + String[] filenameTokens = filename.split(SEPARATOR); + return RemoteStoreUtils.invertLong(filenameTokens[6]); + } + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { String[] tokens = filename.split(SEPARATOR); if (tokens.length < 8) { diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 495288626627b..55280ca5c96d6 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -134,6 +134,15 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * Controls pinned timestamp feature enablement + */ + public static final Setting CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED = Setting.boolSetting( + "cluster.remote_store.pinned_timestamps.enabled", + false, + Setting.Property.NodeScope + ); + /** * Controls pinned timestamp scheduler interval */ @@ -163,6 +172,7 @@ public class RemoteStoreSettings { private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; private volatile int maxRemoteTranslogReaders; private volatile boolean isTranslogMetadataEnabled; + private static volatile boolean isPinnedTimestampsEnabled; private static volatile TimeValue pinnedTimestampsSchedulerInterval; private static volatile TimeValue pinnedTimestampsLookbackInterval; @@ -205,6 +215,7 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { pinnedTimestampsSchedulerInterval = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL.get(settings); pinnedTimestampsLookbackInterval = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL.get(settings); + isPinnedTimestampsEnabled = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.get(settings); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -280,4 +291,8 @@ public static TimeValue getPinnedTimestampsSchedulerInterval() { public static TimeValue getPinnedTimestampsLookbackInterval() { return pinnedTimestampsLookbackInterval; } + + public static boolean isPinnedTimestampsEnabled() { + return isPinnedTimestampsEnabled; + } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 1a9b233b387b2..7e867d3966ff5 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -305,6 +305,7 @@ import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; @@ -812,7 +813,7 @@ protected Node( remoteClusterStateCleanupManager = null; } final RemoteStorePinnedTimestampService remoteStorePinnedTimestampService; - if (isRemoteStoreAttributePresent(settings)) { + if (isRemoteStoreAttributePresent(settings) && CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.get(settings)) { remoteStorePinnedTimestampService = new RemoteStorePinnedTimestampService( repositoriesServiceReference::get, settings, diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index ec48032df4a15..ceaee8337ae34 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -25,23 +25,29 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.test.OpenSearchTestCase; import java.math.BigInteger; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; @@ -60,6 +66,7 @@ import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; @@ -537,4 +544,542 @@ private Map getRemoteStoreNodeAttributes() { remoteStoreNodeAttributes.put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-translog-repo-1"); return remoteStoreNodeAttributes; } + + private void setupRemotePinnedTimestampFeature(boolean enabled) { + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), enabled).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + } + + public void testGetPinnedTimestampLockedFilesFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + // Pinned timestamps 800, 900, 1000, 2000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + Map metadataFilePinnedTimestampCache = new HashMap<>(); + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(0, implicitLockedFiles.size()); + assertEquals(0, metadataFilePinnedTimestampCache.size()); + } + + public void testGetPinnedTimestampLockedFilesWithEmptyMetadataFiles() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Collections.emptyList(); + Set pinnedTimestampSet = new HashSet<>(Arrays.asList(1L, 2L, 3L)); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNoPinnedTimestamps() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Arrays.asList("file1.txt", "file2.txt", "file3.txt"); + Set pinnedTimestampSet = Collections.emptySet(); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNullMetadataFiles() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = null; + Set pinnedTimestampSet = new HashSet<>(Arrays.asList(1L, 2L, 3L)); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNullPinnedTimestampSet() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Arrays.asList("file1.txt", "file2.txt", "file3.txt"); + Set pinnedTimestampSet = null; + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + private Tuple, Set> testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List metadataFileTimestamps, + Set pinnedTimetamps, + Map metadataFilePinnedTimestampCache + ) { + String metadataPrefix = "metadata__1__2__3__4__5__"; + Map metadataFiles = new HashMap<>(); + for (Long metadataFileTimestamp : metadataFileTimestamps) { + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + } + return new Tuple<>( + metadataFiles, + RemoteStoreUtils.getPinnedTimestampLockedFiles( + new ArrayList<>(metadataFiles.values()), + pinnedTimetamps, + metadataFilePinnedTimestampCache, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ) + ); + } + + private Tuple, Set> testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map metadataFileTimestampsPrimaryTermMap, + Set pinnedTimetamps, + Map metadataFilePinnedTimestampCache + ) { + setupRemotePinnedTimestampFeature(true); + Map metadataFiles = new HashMap<>(); + for (Map.Entry metadataFileTimestampPrimaryTerm : metadataFileTimestampsPrimaryTermMap.entrySet()) { + String primaryTerm = RemoteStoreUtils.invertLong(metadataFileTimestampPrimaryTerm.getValue()); + String metadataPrefix = "metadata__" + primaryTerm + "__2__3__4__5__"; + long metadataFileTimestamp = metadataFileTimestampPrimaryTerm.getKey(); + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + } + return new Tuple<>( + metadataFiles, + RemoteStoreUtils.getPinnedTimestampLockedFiles( + new ArrayList<>(metadataFiles.values()), + pinnedTimetamps, + metadataFilePinnedTimestampCache, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ) + ); + } + + public void testGetPinnedTimestampLockedFilesWithPinnedTimestamps() { + setupRemotePinnedTimestampFeature(true); + + Map metadataFilePinnedTimestampCache = new HashMap<>(); + + // Pinned timestamps 800, 900 + // Metadata with timestamp 990 + // No metadata matches the timestamp + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L), + Set.of(800L, 900L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(0, implicitLockedFiles.size()); + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990 + // Metadata timestamp 990 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(990L))); + // This is still 0 as we don't cache the latest metadata file as it can change (explained in the next test case) + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990, 995 + // Metadata timestamp 995 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(995L))); + // This is still 0 as we don't cache the latest metadata file as it can change + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990, 995, 1000 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + // This is still 0 as we don't cache the latest metadata file as it can change + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000, 2000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1001L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + + // Pinned timestamps 800, 900, 1000, 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + // Metadata timestamp 1001 <= Pinned Timestamp 3000 + // Metadata timestamp 1001 <= Pinned Timestamp 4000 + // Metadata timestamp 1001 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1001L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + + // Pinned timestamps 800, 900, 1000, 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001, 1900, 2300 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 2300 <= Pinned Timestamp 3000 + // Metadata timestamp 2300 <= Pinned Timestamp 4000 + // Metadata timestamp 2300 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L, 1900L, 2300L), + Set.of(800L, 900L, 1000L, 2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(3, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(2300L))); + // Now we cache all the matches except the last one. + assertEquals(2, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + + // Pinned timestamps 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001, 1900, 2300 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 2300 <= Pinned Timestamp 3000 + // Metadata timestamp 2300 <= Pinned Timestamp 4000 + // Metadata timestamp 2300 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L, 1900L, 2300L), + Set.of(2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(2300L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + + // Pinned timestamps 2000, 3000, 4000, 5000 + // Metadata with timestamp 1001, 1900, 2300, 3000, 3001, 5500, 6000 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 3000 <= Pinned Timestamp 3000 + // Metadata timestamp 3001 <= Pinned Timestamp 4000 + // Metadata timestamp 3001 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(1001L, 1900L, 2300L, 3000L, 3001L, 5500L, 6000L), + Set.of(2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(3, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3001L))); + // Now we cache all the matches except the last one. + assertEquals(4, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + assertEquals(metadataFiles.get(3000L), metadataFilePinnedTimestampCache.get(3000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(4000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(5000L)); + + // Pinned timestamps 4000, 5000, 6000, 7000 + // Metadata with timestamp 2300, 3000, 3001, 5500, 6000 + // Metadata timestamp 3001 <= Pinned Timestamp 4000 + // Metadata timestamp 3001 <= Pinned Timestamp 5000 + // Metadata timestamp 6000 <= Pinned Timestamp 6000 + // Metadata timestamp 6000 <= Pinned Timestamp 7000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(2300L, 3000L, 3001L, 5500L, 6000L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3001L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6000L))); + // Now we cache all the matches except the last one. + assertEquals(2, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(4000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(5000L)); + } + + public void testGetPinnedTimestampLockedFilesWithPinnedTimestampsDifferentPrefix() { + setupRemotePinnedTimestampFeature(true); + + Map metadataFilePinnedTimestampCache = new HashMap<>(); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 6 - 6998 + // 5 - 6995 + // 5 - 6990 + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 6L, 6995L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 5 - 6998 + // 5 - 6995 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 5L, 6995L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 6 - 6998 + // 5 - 7001 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 6L, 7001L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 5 - 7005 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 7005L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6990L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6990L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 6999 + // 5 - 7005 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(6999L, 6L, 7005L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6999L))); + // Now we cache all the matches except the last one. + assertEquals(0, metadataFilePinnedTimestampCache.size()); + } + + public void testFilterOutMetadataFilesBasedOnAgeFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + List metadataFiles = new ArrayList<>(); + + for (int i = 0; i < randomIntBetween(5, 10); i++) { + metadataFiles.add((System.currentTimeMillis() - randomIntBetween(-150000, 150000)) + "_file" + i + ".txt"); + } + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertEquals(metadataFiles, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_AllFilesOldEnough() { + setupRemotePinnedTimestampFeature(true); + + List metadataFiles = Arrays.asList( + (System.currentTimeMillis() - 150000) + "_file1.txt", + (System.currentTimeMillis() - 300000) + "_file2.txt", + (System.currentTimeMillis() - 450000) + "_file3.txt" + ); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertEquals(metadataFiles, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_SomeFilesTooNew() { + setupRemotePinnedTimestampFeature(true); + + String file1 = (System.currentTimeMillis() - 150000) + "_file1.txt"; + String file2 = (System.currentTimeMillis() - 300000) + "_file2.txt"; + String file3 = (System.currentTimeMillis() + 450000) + "_file3.txt"; + + List metadataFiles = Arrays.asList(file1, file2, file3); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + List expected = Arrays.asList(file1, file2); + assertEquals(expected, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_AllFilesTooNew() { + setupRemotePinnedTimestampFeature(true); + + String file1 = (System.currentTimeMillis() + 150000) + "_file1.txt"; + String file2 = (System.currentTimeMillis() + 300000) + "_file2.txt"; + String file3 = (System.currentTimeMillis() + 450000) + "_file3.txt"; + + List metadataFiles = Arrays.asList(file1, file2, file3); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertTrue(result.isEmpty()); + } + + public void testFilterOutMetadataFilesBasedOnAge_EmptyInputList() { + setupRemotePinnedTimestampFeature(true); + + List metadataFiles = Arrays.asList(); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertTrue(result.isEmpty()); + } + + public void testIsPinnedTimestampStateStaleFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + assertFalse(RemoteStoreUtils.isPinnedTimestampStateStale()); + } + + public void testIsPinnedTimestampStateStaleFeatureEnabled() { + setupRemotePinnedTimestampFeature(true); + assertTrue(RemoteStoreUtils.isPinnedTimestampStateStale()); + } + } From 6e701918d07b403d1b3d368c0fe0ce595f035380 Mon Sep 17 00:00:00 2001 From: Brandon Shien <44730413+bshien@users.noreply.github.com> Date: Mon, 26 Aug 2024 11:48:53 -0700 Subject: [PATCH 15/17] Add release notes for release 1.3.19 (#15392) Signed-off-by: Brandon Shien --- release-notes/opensearch.release-notes-1.3.19.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.19.md diff --git a/release-notes/opensearch.release-notes-1.3.19.md b/release-notes/opensearch.release-notes-1.3.19.md new file mode 100644 index 0000000000000..fe62624fc6362 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.19.md @@ -0,0 +1,5 @@ +## 2024-08-22 Version 1.3.19 Release Notes + +### Upgrades +- OpenJDK Update (July 2024 Patch releases) ([#15002](https://github.com/opensearch-project/OpenSearch/pull/15002)) +- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) From f247d8fe1b88487554373495bd5f8f8fcc1f0147 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 15:53:43 -0400 Subject: [PATCH 16/17] Bump tj-actions/changed-files from 44 to 45 (#15422) * Bump tj-actions/changed-files from 44 to 45 Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 44 to 45. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v44...v45) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/gradle-check.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 89d894403ff1a..1b9b30625eb83 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: files_ignore: | release-notes/*.md diff --git a/CHANGELOG.md b/CHANGELOG.md index deea2778dedd2..2838437200db8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `commons-cli:commons-cli` from 1.8.0 to 1.9.0 ([#15298](https://github.com/opensearch-project/OpenSearch/pull/15298)) - Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) - Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `tj-actions/changed-files` from 44 to 45 ([#15422](https://github.com/opensearch-project/OpenSearch/pull/15422)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) From 46a269ef21e88e3cb1398474e4bf82af4c3b3b7f Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Mon, 26 Aug 2024 14:11:24 -0700 Subject: [PATCH 17/17] Do not throw exception when flat_object field is explicitly null (#15375) It is valid for a flat_object field to have an explicit value of null. (It's functionally the same as not specifying the field at all.) Prior to this fix, though, we would erroneously advance the context parser to the next token, violating the contract with DocumentParser (which says that a call to parseCreateField with a null value should complete with the parser still pointing at the null value -- it is DocumentParser's responsibility to advance). Signed-off-by: Michael Froh * Fix unit test Signed-off-by: Michael Froh * Add changelog entry Signed-off-by: Michael Froh --------- Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../test/index/100_partial_flat_object.yml | 13 +++++++++++-- .../index/mapper/FlatObjectFieldMapper.java | 6 +----- .../index/mapper/FlatObjectFieldMapperTests.java | 8 ++++---- 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2838437200db8..c04ddb6724d28 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fixed array field name omission in flat_object function for nested JSON ([#13620](https://github.com/opensearch-project/OpenSearch/pull/13620)) - Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) - Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) +- Fix indexing error when flat_object field is explicitly null ([#15375](https://github.com/opensearch-project/OpenSearch/pull/15375)) - Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) - Fix unchecked cast in dynamic action map getter ([#15394](https://github.com/opensearch-project/OpenSearch/pull/15394)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml index 91e4127da9c32..e1bc86f1c9f3d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml @@ -88,7 +88,16 @@ setup: } ] } } - + - do: + index: + index: test_partial_flat_object + id: 4 + body: { + "issue": { + "number": 999, + "labels": null + } + } - do: indices.refresh: index: test_partial_flat_object @@ -135,7 +144,7 @@ teardown: } } - - length: { hits.hits: 3 } + - length: { hits.hits: 4 } # Match Query with exact dot path. - do: diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index b82fa3999612a..bf8f83e1b95df 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -568,11 +568,7 @@ protected void parseCreateField(ParseContext context) throws IOException { if (context.externalValueSet()) { String value = context.externalValue().toString(); parseValueAddFields(context, value, fieldType().name()); - } else if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) { - context.parser().nextToken(); // This triggers an exception in DocumentParser. - // We could remove the above nextToken() call to skip the null value, but the existing - // behavior (since 2.7) throws the exception. - } else { + } else if (context.parser().currentToken() != XContentParser.Token.VALUE_NULL) { JsonToStringXContentParser jsonToStringParser = new JsonToStringXContentParser( NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java index 637072c8886c1..5b5ca378ee7ff 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -25,7 +25,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.IsEqual.equalTo; -import static org.hamcrest.core.StringContains.containsString; public class FlatObjectFieldMapperTests extends MapperTestCase { private static final String FIELD_TYPE = "flat_object"; @@ -133,9 +132,10 @@ public void testDefaults() throws Exception { public void testNullValue() throws IOException { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.nullField("field")))); - assertThat(e.getMessage(), containsString("object mapping for [_doc] tried to parse field [field] as object")); - + ParsedDocument parsedDocument = mapper.parse(source(b -> b.nullField("field"))); + assertEquals(1, parsedDocument.docs().size()); + IndexableField[] fields = parsedDocument.rootDoc().getFields("field"); + assertEquals(0, fields.length); } @Override