From 5bce3f2cebce81504a4559d3d9f338a6e97f6542 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 18 Jan 2023 09:08:30 +0100 Subject: [PATCH 01/13] ActiveShardCount should wait for searchable shards to appear (#92980) --- .../action/support/ActiveShardCount.java | 16 ++-- .../routing/IndexShardRoutingTable.java | 40 +++++++-- .../action/support/ActiveShardCountTests.java | 83 +++++++++++++++++-- 3 files changed, 117 insertions(+), 22 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java b/server/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java index 3ed83fb1c4e5d..34f0ce1a2fe2e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java +++ b/server/src/main/java/org/elasticsearch/action/support/ActiveShardCount.java @@ -153,8 +153,7 @@ public boolean enoughShardsActive(final ClusterState clusterState, final String. waitForActiveShards = SETTING_WAIT_FOR_ACTIVE_SHARDS.get(indexMetadata.getSettings()); } for (int i = 0; i < indexRoutingTable.size(); i++) { - IndexShardRoutingTable shardRouting = indexRoutingTable.shard(i); - if (waitForActiveShards.enoughShardsActive(shardRouting) == false) { + if (waitForActiveShards.enoughShardsActive(indexRoutingTable.shard(i)) == false) { // not enough active shard copies yet return false; } @@ -171,13 +170,13 @@ public boolean enoughShardsActive(final ClusterState clusterState, final String. public boolean enoughShardsActive(final IndexShardRoutingTable shardRoutingTable) { final int activeShardCount = shardRoutingTable.activeShards().size(); if (this == ActiveShardCount.ALL) { - // adding 1 for the primary in addition to the total number of replicas, - // which gives us the total number of shard copies - return activeShardCount == shardRoutingTable.replicaShards().size() + 1; - } else if (this == ActiveShardCount.DEFAULT) { - return activeShardCount >= 1; + return activeShardCount == shardRoutingTable.size(); + } else if (value == 0) { + return true; + } else if (value == 1) { + return shardRoutingTable.hasSearchShards() ? shardRoutingTable.getActiveSearchShardCount() >= 1 : activeShardCount >= 1; } else { - return activeShardCount >= value; + return shardRoutingTable.getActiveSearchShardCount() >= value; } } @@ -189,5 +188,4 @@ public String toString() { default -> Integer.toString(value); }; } - } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 3fc69ceb7b826..0dd85d873463d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -45,23 +45,23 @@ public class IndexShardRoutingTable { final ShardShuffler shuffler; final ShardId shardId; - + final ShardRouting[] shards; final ShardRouting primary; final List replicas; - final ShardRouting[] shards; final List activeShards; final List assignedShards; - final boolean allShardsStarted; - /** * The initializing list, including ones that are initializing on a target node because of relocation. * If we can come up with a better variable name, it would be nice... */ final List allInitializingShards; + final boolean allShardsStarted; + final int activeSearchShardCount; + final int totalSearchShardCount; IndexShardRoutingTable(ShardId shardId, List shards) { - this.shardId = shardId; this.shuffler = new RotationShardShuffler(Randomness.get().nextInt()); + this.shardId = shardId; this.shards = shards.toArray(ShardRouting[]::new); ShardRouting primary = null; @@ -70,6 +70,8 @@ public class IndexShardRoutingTable { List assignedShards = new ArrayList<>(); List allInitializingShards = new ArrayList<>(); boolean allShardsStarted = true; + int activeSearchShardCount = 0; + int totalSearchShardCount = 0; for (ShardRouting shard : this.shards) { if (shard.primary()) { assert primary == null : "duplicate primary: " + primary + " vs " + shard; @@ -79,6 +81,12 @@ public class IndexShardRoutingTable { } if (shard.active()) { activeShards.add(shard); + if (shard.role().isSearchable()) { + activeSearchShardCount++; + } + } + if (shard.role().isSearchable()) { + totalSearchShardCount++; } if (shard.initializing()) { allInitializingShards.add(shard); @@ -97,12 +105,14 @@ public class IndexShardRoutingTable { allShardsStarted = false; } } - this.allShardsStarted = allShardsStarted; this.primary = primary; this.replicas = CollectionUtils.wrapUnmodifiableOrEmptySingleton(replicas); this.activeShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(activeShards); this.assignedShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(assignedShards); this.allInitializingShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(allInitializingShards); + this.allShardsStarted = allShardsStarted; + this.activeSearchShardCount = activeSearchShardCount; + this.totalSearchShardCount = totalSearchShardCount; } /** @@ -461,6 +471,24 @@ public boolean allShardsStarted() { return allShardsStarted; } + /** + * @return the count of active searchable shards + */ + public int getActiveSearchShardCount() { + return activeSearchShardCount; + } + + /** + * @return the total count of searchable shards + */ + public int getTotalSearchShardCount() { + return totalSearchShardCount; + } + + public boolean hasSearchShards() { + return totalSearchShardCount > 0; + } + @Nullable public ShardRouting getByAllocationId(String allocationId) { for (ShardRouting shardRouting : assignedShards()) { diff --git a/server/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java b/server/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java index e82e1d3fac5fc..aab2e13999513 100644 --- a/server/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -102,6 +103,73 @@ public void testEnoughShardsActiveLevelDefault() { runTestForOneActiveShard(ActiveShardCount.DEFAULT); } + public void testEnoughShardsActiveLevelDefaultWithSearchOnlyRole() { + final String indexName = "test-idx"; + final int numberOfShards = randomIntBetween(1, 5); + final int numberOfReplicas = randomIntBetween(4, 7); + final ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; + ClusterState clusterState = initializeWithNewIndex(indexName, numberOfShards, numberOfReplicas, createCustomRoleStrategy(1)); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startPrimaries(clusterState, indexName); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startLessThanWaitOnShards(clusterState, indexName, 1); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startAllShards(clusterState, indexName); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + } + + public void testEnoughShardsActiveCustomLevelWithSearchOnlyRole() { + final String indexName = "test-idx"; + final int numberOfShards = randomIntBetween(1, 5); + final int numberOfReplicas = randomIntBetween(4, 7); + final int activeShardCount = randomIntBetween(2, numberOfReplicas); + final ActiveShardCount waitForActiveShards = ActiveShardCount.from(activeShardCount); + ClusterState clusterState = initializeWithNewIndex(indexName, numberOfShards, numberOfReplicas, createCustomRoleStrategy(1)); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startPrimaries(clusterState, indexName); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startLessThanWaitOnShards(clusterState, indexName, activeShardCount - 2); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startWaitOnShards(clusterState, indexName, activeShardCount); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startAllShards(clusterState, indexName); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + } + + public void testEnoughShardsActiveWithNoSearchOnlyRoles() { + final String indexName = "test-idx"; + final int numberOfShards = randomIntBetween(1, 5); + final int numberOfReplicas = randomIntBetween(4, 7); + final ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; + ClusterState clusterState = initializeWithNewIndex( + indexName, + numberOfShards, + numberOfReplicas, + createCustomRoleStrategy(numberOfReplicas + 1) + ); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startPrimaries(clusterState, indexName); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startLessThanWaitOnShards(clusterState, indexName, 1); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startAllShards(clusterState, indexName); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + } + + private static ShardRoutingRoleStrategy createCustomRoleStrategy(int indexShardCount) { + return new ShardRoutingRoleStrategy() { + @Override + public ShardRouting.Role newEmptyRole(int copyIndex) { + return copyIndex < indexShardCount ? ShardRouting.Role.INDEX_ONLY : ShardRouting.Role.SEARCH_ONLY; + } + + @Override + public ShardRouting.Role newReplicaRole() { + return ShardRouting.Role.SEARCH_ONLY; + } + }; + } + public void testEnoughShardsActiveRandom() { final String indexName = "test-idx"; final int numberOfShards = randomIntBetween(1, 5); @@ -166,12 +234,11 @@ public void testEnoughShardsActiveWithClosedIndex() { } } - private void runTestForOneActiveShard(final ActiveShardCount activeShardCount) { + private void runTestForOneActiveShard(final ActiveShardCount waitForActiveShards) { final String indexName = "test-idx"; final int numberOfShards = randomIntBetween(1, 5); final int numberOfReplicas = randomIntBetween(4, 7); - assert activeShardCount == ActiveShardCount.ONE || activeShardCount == ActiveShardCount.DEFAULT; - final ActiveShardCount waitForActiveShards = activeShardCount; + assert waitForActiveShards == ActiveShardCount.ONE || waitForActiveShards == ActiveShardCount.DEFAULT; ClusterState clusterState = initializeWithNewIndex(indexName, numberOfShards, numberOfReplicas); assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); clusterState = startPrimaries(clusterState, indexName); @@ -180,7 +247,11 @@ private void runTestForOneActiveShard(final ActiveShardCount activeShardCount) { assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); } - private ClusterState initializeWithNewIndex(final String indexName, final int numShards, final int numReplicas) { + private ClusterState initializeWithNewIndex(String indexName, int numShards, int numReplicas) { + return initializeWithNewIndex(indexName, numShards, numReplicas, TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); + } + + private ClusterState initializeWithNewIndex(String indexName, int numShards, int numReplicas, ShardRoutingRoleStrategy strategy) { // initial index creation and new routing table info final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) .settings(settings(Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())) @@ -188,9 +259,7 @@ private ClusterState initializeWithNewIndex(final String indexName, final int nu .numberOfReplicas(numReplicas) .build(); final Metadata metadata = Metadata.builder().put(indexMetadata, true).build(); - final RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) - .addAsNew(indexMetadata) - .build(); + final RoutingTable routingTable = RoutingTable.builder(strategy).addAsNew(indexMetadata).build(); return ClusterState.builder(new ClusterName("test_cluster")).metadata(metadata).routingTable(routingTable).build(); } From a8706293e618eecef3041e9fa760c07f10e4d976 Mon Sep 17 00:00:00 2001 From: Michael Bischoff Date: Wed, 18 Jan 2023 11:37:40 +0100 Subject: [PATCH 02/13] Pipeline setting missing in reindex.asciidoc (#89125) * Update reindex.asciidoc * Update docs/reference/docs/reindex.asciidoc Co-authored-by: Abdon Pijpelink --- docs/reference/docs/reindex.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 21df11cf2c156..5f8b1fcc7f0c3 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -575,6 +575,9 @@ Valid values: `index`, `create`. Defaults to `index`. IMPORTANT: To reindex to a data stream destination, this argument must be `create`. +`pipeline`::: +(Optional, string) the name of the <> to use. + `script`:: `source`::: (Optional, string) The script to run to update the document source or metadata when reindexing. From 6ec31ed1533e6aaf8fc6dabda5e419f5dce984fa Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 18 Jan 2023 10:46:28 +0000 Subject: [PATCH 03/13] Reduce DEBUG log noise from LocalHealthMonitor (#92933) There's no need to log `DEBUG` messages on every cluster state update here. Dropping ones that happen every time to `TRACE`. Also gives `HealthMetadata` a proper `toString()` implementation since it appears in the output of `ClusterState#toString` --- .../org/elasticsearch/health/metadata/HealthMetadata.java | 6 ++++++ .../org/elasticsearch/health/node/LocalHealthMonitor.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index 9c69792914247..b5d93f74f0efa 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -96,6 +97,11 @@ public int hashCode() { return Objects.hash(diskMetadata); } + @Override + public String toString() { + return "HealthMetadata{diskMetadata=" + Strings.toString(diskMetadata) + '}'; + } + /** * Contains the thresholds necessary to determine the health of the disk space of a node. The thresholds are determined by the elected * master. diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index 50146324235a6..9d034bb3c249b 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -160,7 +160,7 @@ private void startMonitoringIfNecessary() { ); logger.debug("Local health monitoring started {}", monitoring); } else { - logger.debug("Local health monitoring already started {}, skipping", monitoring); + logger.trace("Local health monitoring already started {}, skipping", monitoring); } } } From e22e0242e16d5df53ebded96b20252e7abda85c3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 18 Jan 2023 10:48:48 +0000 Subject: [PATCH 04/13] Introduce RefCountingListener (#92995) Like `RefCountingRunnable` but it keeps track of a (bounded) set of exceptions received by the acquired listeners too. --- .../action/support/RefCountingListener.java | 177 +++++++++++++++ .../action/support/RefCountingRunnable.java | 11 +- .../blobstore/BlobStoreRepository.java | 91 ++++---- .../support/RefCountingListenerTests.java | 207 ++++++++++++++++++ .../blobcache/common/SparseFileTracker.java | 62 ++---- 5 files changed, 458 insertions(+), 90 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java create mode 100644 server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java diff --git a/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java b/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java new file mode 100644 index 0000000000000..7fe6e7df1b407 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/RefCountingListener.java @@ -0,0 +1,177 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Releasable; + +import java.util.Objects; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +/** + * A mechanism to complete a listener on the completion of some (dynamic) collection of other actions. Basic usage is as follows: + * + *
+ * try (var refs = new RefCountingListener(finalListener)) {
+ *     for (var item : collection) {
+ *         runAsyncAction(item, refs.acquire()); // completes the acquired listener on completion
+ *     }
+ * }
+ * 
+ * + * The delegate listener is completed when execution leaves the try-with-resources block and every acquired reference is released. The + * {@link RefCountingListener} collects (a bounded number of) exceptions received by its subsidiary listeners, and completes the delegate + * listener with an exception if (and only if) any subsidiary listener fails. However, unlike a {@link GroupedActionListener} it leaves it + * to the caller to collect the results of successful completions by accumulating them in a data structure of its choice. Also unlike a + * {@link GroupedActionListener} there is no need to declare the number of subsidiary listeners up front: listeners can be acquired + * dynamically as needed. Finally, you can continue to acquire additional listeners even outside the try-with-resources block, perhaps in a + * separate thread, as long as there's at least one listener outstanding: + * + *
+ * try (var refs = new RefCountingListener(finalListener)) {
+ *     for (var item : collection) {
+ *         if (condition(item)) {
+ *             runAsyncAction(item, refs.acquire().map(results::add));
+ *         }
+ *     }
+ *     if (flag) {
+ *         runOneOffAsyncAction(refs.acquire().map(results::add));
+ *         return;
+ *     }
+ *     for (var item : otherCollection) {
+ *         var itemRef = refs.acquire(); // delays completion while the background action is pending
+ *         executorService.execute(() -> {
+ *             try {
+ *                 if (condition(item)) {
+ *                     runOtherAsyncAction(item, refs.acquire().map(results::add));
+ *                 }
+ *             } finally {
+ *                 itemRef.onResponse(null);
+ *             }
+ *         });
+ *     }
+ * }
+ * 
+ * + * In particular (and also unlike a {@link GroupedActionListener}) this works even if you don't acquire any extra refs at all: in that case, + * the delegate listener is completed at the end of the try-with-resources block. + */ +public final class RefCountingListener implements Releasable { + + private final ActionListener delegate; + private final RefCountingRunnable refs = new RefCountingRunnable(this::finish); + + private final AtomicReference exceptionRef = new AtomicReference<>(); + private final Semaphore exceptionPermits; + private final AtomicInteger droppedExceptionsRef = new AtomicInteger(); + + /** + * Construct a {@link RefCountingListener} which completes {@code delegate} when all refs are released. + * @param delegate The listener to complete when all refs are released. This listener must not throw any exception on completion. If all + * the acquired listeners completed successfully then so is the delegate. If any of the acquired listeners completed + * with failure then the delegate is completed with the first exception received, with other exceptions added to its + * collection of suppressed exceptions. + */ + public RefCountingListener(ActionListener delegate) { + this(10, delegate); + } + + /** + * Construct a {@link RefCountingListener} which completes {@code delegate} when all refs are released. + * @param delegate The listener to complete when all refs are released. This listener must not throw any exception on completion. If all + * the acquired listeners completed successfully then so is the delegate. If any of the acquired listeners completed + * with failure then the delegate is completed with the first exception received, with other exceptions added to its + * collection of suppressed exceptions. + * @param maxExceptions The maximum number of exceptions to accumulate on failure. + */ + public RefCountingListener(int maxExceptions, ActionListener delegate) { + if (maxExceptions <= 0) { + assert false : maxExceptions; + throw new IllegalArgumentException("maxExceptions must be positive"); + } + this.delegate = Objects.requireNonNull(delegate); + this.exceptionPermits = new Semaphore(maxExceptions); + } + + /** + * Release the original reference to this object, which commpletes the delegate {@link ActionListener} if there are no other references. + * + * It is invalid to call this method more than once. Doing so will trip an assertion if assertions are enabled, but will be ignored + * otherwise. This deviates from the contract of {@link java.io.Closeable}. + */ + @Override + public void close() { + refs.close(); + } + + private void finish() { + try { + var exception = exceptionRef.get(); + if (exception == null) { + delegate.onResponse(null); + } else { + final var droppedExceptions = droppedExceptionsRef.getAndSet(0); + if (droppedExceptions > 0) { + exception.addSuppressed(new ElasticsearchException(droppedExceptions + " further exceptions were dropped")); + } + delegate.onFailure(exception); + } + } catch (Exception e) { + assert false : e; + throw e; + } + } + + /** + * Acquire a reference to this object and return a listener which releases it. The delegate {@link ActionListener} is called when all + * its references have been released. + * + * It is invalid to call this method once all references are released. Doing so will trip an assertion if assertions are enabled, and + * will throw an {@link IllegalStateException} otherwise. + * + * It is also invalid to complete the returned listener more than once. Doing so will trip an assertion if assertions are enabled, but + * will be ignored otherwise. + */ + public ActionListener acquire() { + return new ActionListener<>() { + private final Releasable ref = refs.acquire(); + + @Override + public void onResponse(T unused) { + ref.close(); + } + + @Override + public void onFailure(Exception e) { + if (exceptionPermits.tryAcquire()) { + final var firstException = exceptionRef.compareAndExchange(null, e); + if (firstException != null && firstException != e) { + firstException.addSuppressed(e); + } + } else { + droppedExceptionsRef.incrementAndGet(); + } + ref.close(); + } + + @Override + public String toString() { + return RefCountingListener.this.toString(); + } + }; + } + + @Override + public String toString() { + return "refCounting[" + delegate + "]"; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java b/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java index 0fea6dbeb1ad8..c3a5bb3989722 100644 --- a/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java +++ b/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java @@ -32,7 +32,7 @@ * The delegate action is completed when execution leaves the try-with-resources block and every acquired reference is released. Unlike a * {@link CountDown} there is no need to declare the number of subsidiary actions up front (refs can be acquired dynamically as needed) nor * does the caller need to check for completion each time a reference is released. Moreover even outside the try-with-resources block you - * can continue to acquire additional listeners, even in a separate thread, as long as there's at least one listener outstanding: + * can continue to acquire additional references, even in a separate thread, as long as there's at least one reference outstanding: * *
  * try (var refs = new RefCountingRunnable(finalRunnable)) {
@@ -95,7 +95,11 @@ public RefCountingRunnable(Runnable delegate) {
      * Acquire a reference to this object and return an action which releases it. The delegate {@link Runnable} is called when all its
      * references have been released.
      *
-     * Callers must take care to close the returned resource exactly once. This deviates from the contract of {@link java.io.Closeable}.
+     * It is invalid to call this method once all references are released. Doing so will trip an assertion if assertions are enabled, and
+     * will throw an {@link IllegalStateException} otherwise.
+     *
+     * It is also invalid to release the acquired resource more than once. Doing so will trip an assertion if assertions are enabled, but
+     * will be ignored otherwise. This deviates from the contract of {@link java.io.Closeable}.
      */
     public Releasable acquire() {
         if (refCounted.tryIncRef()) {
@@ -116,7 +120,8 @@ public ActionListener acquireListener() {
     /**
      * Release the original reference to this object, which executes the delegate {@link Runnable} if there are no other references.
      *
-     * Callers must take care to close this resource exactly once. This deviates from the contract of {@link java.io.Closeable}.
+     * It is invalid to call this method more than once. Doing so will trip an assertion if assertions are enabled, but will be ignored
+     * otherwise. This deviates from the contract of {@link java.io.Closeable}.
      */
     @Override
     public void close() {
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
index 8ba8f3acde601..9dea01238a02f 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -27,10 +27,10 @@
 import org.elasticsearch.action.ActionRunnable;
 import org.elasticsearch.action.SingleResultDeduplicator;
 import org.elasticsearch.action.StepListener;
-import org.elasticsearch.action.support.CountDownActionListener;
 import org.elasticsearch.action.support.GroupedActionListener;
 import org.elasticsearch.action.support.ListenableActionFuture;
 import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.action.support.RefCountingListener;
 import org.elasticsearch.action.support.RefCountingRunnable;
 import org.elasticsearch.action.support.ThreadedActionListener;
 import org.elasticsearch.cluster.ClusterState;
@@ -1422,7 +1422,7 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte
                 indexMetaIdentifiers = null;
             }
 
-            final ActionListener allMetaListener = new CountDownActionListener(2 + indices.size(), ActionListener.wrap(v -> {
+            try (var allMetaListeners = new RefCountingListener(ActionListener.wrap(v -> {
                 final String slmPolicy = slmPolicy(snapshotInfo);
                 final SnapshotDetails snapshotDetails = new SnapshotDetails(
                     snapshotInfo.state(),
@@ -1445,52 +1445,53 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte
                         }
                     }, onUpdateFailure)
                 );
-            }, onUpdateFailure));
-
-            // We ignore all FileAlreadyExistsException when writing metadata since otherwise a master failover while in this method will
-            // mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version of the
-            // index or global metadata will be compatible with the segments written in this snapshot as well.
-            // Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a way
-            // that decrements the generation it points at
-            final Metadata clusterMetadata = finalizeSnapshotContext.clusterMetadata();
-            // Write Global MetaData
-            executor.execute(
-                ActionRunnable.run(
-                    allMetaListener,
-                    () -> GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress)
-                )
-            );
+            }, onUpdateFailure))) {
+
+                // We ignore all FileAlreadyExistsException when writing metadata since otherwise a master failover while in this method
+                // will mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version of
+                // the index or global metadata will be compatible with the segments written in this snapshot as well.
+                // Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a way
+                // that decrements the generation it points at
+                final Metadata clusterMetadata = finalizeSnapshotContext.clusterMetadata();
+                // Write Global MetaData
+                executor.execute(
+                    ActionRunnable.run(
+                        allMetaListeners.acquire(),
+                        () -> GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress)
+                    )
+                );
 
-            // write the index metadata for each index in the snapshot
-            for (IndexId index : indices) {
-                executor.execute(ActionRunnable.run(allMetaListener, () -> {
-                    final IndexMetadata indexMetaData = clusterMetadata.index(index.getName());
-                    if (writeIndexGens) {
-                        final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData);
-                        String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers);
-                        if (metaUUID == null) {
-                            // We don't yet have this version of the metadata so we write it
-                            metaUUID = UUIDs.base64UUID();
-                            INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress);
-                            indexMetaIdentifiers.put(identifiers, metaUUID);
+                // write the index metadata for each index in the snapshot
+                for (IndexId index : indices) {
+                    executor.execute(ActionRunnable.run(allMetaListeners.acquire(), () -> {
+                        final IndexMetadata indexMetaData = clusterMetadata.index(index.getName());
+                        if (writeIndexGens) {
+                            final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData);
+                            String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers);
+                            if (metaUUID == null) {
+                                // We don't yet have this version of the metadata so we write it
+                                metaUUID = UUIDs.base64UUID();
+                                INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress);
+                                indexMetaIdentifiers.put(identifiers, metaUUID);
+                            }
+                            indexMetas.put(index, identifiers);
+                        } else {
+                            INDEX_METADATA_FORMAT.write(
+                                clusterMetadata.index(index.getName()),
+                                indexContainer(index),
+                                snapshotId.getUUID(),
+                                compress
+                            );
                         }
-                        indexMetas.put(index, identifiers);
-                    } else {
-                        INDEX_METADATA_FORMAT.write(
-                            clusterMetadata.index(index.getName()),
-                            indexContainer(index),
-                            snapshotId.getUUID(),
-                            compress
-                        );
-                    }
-                }));
+                    }));
+                }
+                executor.execute(
+                    ActionRunnable.run(
+                        allMetaListeners.acquire(),
+                        () -> SNAPSHOT_FORMAT.write(snapshotInfo, blobContainer(), snapshotId.getUUID(), compress)
+                    )
+                );
             }
-            executor.execute(
-                ActionRunnable.run(
-                    allMetaListener,
-                    () -> SNAPSHOT_FORMAT.write(snapshotInfo, blobContainer(), snapshotId.getUUID(), compress)
-                )
-            );
         }, onUpdateFailure);
     }
 
diff --git a/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java b/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java
new file mode 100644
index 0000000000000..6b899748438a5
--- /dev/null
+++ b/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java
@@ -0,0 +1,207 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.ArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.common.util.concurrent.EsExecutors.DIRECT_EXECUTOR_SERVICE;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class RefCountingListenerTests extends ESTestCase {
+
+    public void testBasicOperation() throws InterruptedException {
+        final var executed = new AtomicBoolean();
+        final var exceptionCount = new AtomicInteger();
+        final var threads = new Thread[between(0, 3)];
+        final var exceptionLimit = Math.max(1, between(0, threads.length));
+
+        boolean async = false;
+        final var startLatch = new CountDownLatch(1);
+
+        try (var refs = new RefCountingListener(exceptionLimit, new ActionListener<>() {
+            @Override
+            public void onResponse(Void unused) {
+                assertTrue(executed.compareAndSet(false, true));
+                assertEquals(0, exceptionCount.get());
+            }
+
+            @Override
+            public void onFailure(Exception e) {
+                assertTrue(executed.compareAndSet(false, true));
+                assertThat(exceptionCount.get(), greaterThan(0));
+                Throwable[] suppressed = e.getSuppressed();
+                if (exceptionCount.get() > exceptionLimit) {
+                    assertEquals(exceptionLimit, suppressed.length);
+                    for (int i = 0; i < suppressed.length; i++) {
+                        Throwable throwable = suppressed[i];
+                        if (i == suppressed.length - 1) {
+                            assertThat(
+                                throwable.getMessage(),
+                                equalTo((exceptionCount.get() - exceptionLimit) + " further exceptions were dropped")
+                            );
+                        } else {
+                            assertThat(throwable.getMessage(), equalTo("simulated"));
+                        }
+                    }
+                } else {
+                    assertEquals(exceptionCount.get() - 1, suppressed.length);
+                    for (Throwable throwable : suppressed) {
+                        assertThat(throwable.getMessage(), equalTo("simulated"));
+                    }
+                }
+            }
+
+            @Override
+            public String toString() {
+                return "test listener";
+            }
+        })) {
+            assertEquals("refCounting[test listener]", refs.toString());
+            var listener = refs.acquire();
+            assertThat(listener.toString(), containsString("refCounting[test listener]"));
+            listener.onResponse(null);
+
+            for (int i = 0; i < threads.length; i++) {
+                if (randomBoolean()) {
+                    async = true;
+                    var ref = refs.acquire();
+                    threads[i] = new Thread(() -> {
+                        try {
+                            assertTrue(startLatch.await(10, TimeUnit.SECONDS));
+                        } catch (InterruptedException e) {
+                            throw new AssertionError(e);
+                        }
+                        assertFalse(executed.get());
+                        if (randomBoolean()) {
+                            ref.onResponse(null);
+                        } else {
+                            exceptionCount.incrementAndGet();
+                            ref.onFailure(new ElasticsearchException("simulated"));
+                        }
+                    });
+                }
+            }
+
+            assertFalse(executed.get());
+        }
+
+        assertNotEquals(async, executed.get());
+
+        for (Thread thread : threads) {
+            if (thread != null) {
+                thread.start();
+            }
+        }
+
+        startLatch.countDown();
+
+        for (Thread thread : threads) {
+            if (thread != null) {
+                thread.join();
+            }
+        }
+
+        assertTrue(executed.get());
+    }
+
+    @SuppressWarnings("resource")
+    public void testNullCheck() {
+        expectThrows(NullPointerException.class, () -> new RefCountingListener(between(1, 10), null));
+    }
+
+    public void testValidation() {
+        final var callCount = new AtomicInteger();
+        final var refs = new RefCountingListener(Integer.MAX_VALUE, ActionListener.wrap(callCount::incrementAndGet));
+        refs.close();
+        assertEquals(1, callCount.get());
+
+        for (int i = between(1, 5); i > 0; i--) {
+            final ThrowingRunnable throwingRunnable;
+            final String expectedMessage;
+            if (randomBoolean()) {
+                throwingRunnable = refs::acquire;
+                expectedMessage = RefCountingRunnable.ALREADY_CLOSED_MESSAGE;
+            } else {
+                throwingRunnable = refs::close;
+                expectedMessage = "already closed";
+            }
+
+            assertEquals(expectedMessage, expectThrows(AssertionError.class, throwingRunnable).getMessage());
+            assertEquals(1, callCount.get());
+        }
+    }
+
+    public void testJavaDocExample() {
+        final var flag = new AtomicBoolean();
+        runExample(ActionListener.wrap(() -> assertTrue(flag.compareAndSet(false, true))));
+        assertTrue(flag.get());
+    }
+
+    private void runExample(ActionListener finalListener) {
+        final var collection = randomList(10, Object::new);
+        final var otherCollection = randomList(10, Object::new);
+        final var flag = randomBoolean();
+        @SuppressWarnings("UnnecessaryLocalVariable")
+        final var executorService = DIRECT_EXECUTOR_SERVICE;
+        final var results = new ArrayList<>();
+
+        try (var refs = new RefCountingListener(finalListener)) {
+            for (var item : collection) {
+                if (condition(item)) {
+                    runAsyncAction(item, refs.acquire().map(results::add));
+                }
+            }
+            if (flag) {
+                runOneOffAsyncAction(refs.acquire().map(results::add));
+                return;
+            }
+            for (var item : otherCollection) {
+                var itemRef = refs.acquire(); // delays completion while the background action is pending
+                executorService.execute(() -> {
+                    try {
+                        if (condition(item)) {
+                            runOtherAsyncAction(item, refs.acquire().map(results::add));
+                        }
+                    } finally {
+                        itemRef.onResponse(null);
+                    }
+                });
+            }
+        }
+    }
+
+    @SuppressWarnings("unused")
+    private boolean condition(Object item) {
+        return randomBoolean();
+    }
+
+    @SuppressWarnings("unused")
+    private void runAsyncAction(Object item, ActionListener listener) {
+        listener.onResponse(null);
+    }
+
+    @SuppressWarnings("unused")
+    private void runOtherAsyncAction(Object item, ActionListener listener) {
+        listener.onResponse(null);
+    }
+
+    private void runOneOffAsyncAction(ActionListener listener) {
+        listener.onResponse(null);
+    }
+}
diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java
index 8acabd1a8c204..73d2ed5efd41d 100644
--- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java
+++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java
@@ -9,7 +9,7 @@
 
 import org.elasticsearch.Assertions;
 import org.elasticsearch.action.ActionListener;
-import org.elasticsearch.action.support.GroupedActionListener;
+import org.elasticsearch.action.support.RefCountingListener;
 import org.elasticsearch.core.Nullable;
 
 import java.util.ArrayList;
@@ -243,30 +243,7 @@ public List waitForRange(final ByteRange range, final ByteRange subRange, f
                     .collect(Collectors.toList());
         }
 
-        // NB we work with ranges outside the mutex here, but only to interact with their completion listeners which are `final` so
-        // there is no risk of concurrent modification.
-
-        switch (requiredRanges.size()) {
-            case 0 ->
-                // no need to wait for the gaps to be filled, the listener can be executed immediately
-                wrappedListener.onResponse(null);
-            case 1 -> {
-                final Range requiredRange = requiredRanges.get(0);
-                requiredRange.completionListener.addListener(
-                    wrappedListener.map(progress -> null),
-                    Math.min(requiredRange.completionListener.end, subRange.end())
-                );
-            }
-            default -> {
-                final GroupedActionListener groupedActionListener = new GroupedActionListener<>(
-                    requiredRanges.size(),
-                    wrappedListener.map(progress -> null)
-                );
-                requiredRanges.forEach(
-                    r -> r.completionListener.addListener(groupedActionListener, Math.min(r.completionListener.end, subRange.end()))
-                );
-            }
-        }
+        subscribeToCompletionListeners(requiredRanges, subRange.end(), wrappedListener);
 
         return Collections.unmodifiableList(gaps);
     }
@@ -332,31 +309,32 @@ public boolean waitForRangeIfPending(final ByteRange range, final ActionListener
             assert invariant();
         }
 
+        subscribeToCompletionListeners(pendingRanges, range.end(), wrappedListener);
+        return true;
+    }
+
+    private void subscribeToCompletionListeners(List requiredRanges, long rangeEnd, ActionListener listener) {
         // NB we work with ranges outside the mutex here, but only to interact with their completion listeners which are `final` so
         // there is no risk of concurrent modification.
-
-        switch (pendingRanges.size()) {
-            case 0 -> wrappedListener.onResponse(null);
+        switch (requiredRanges.size()) {
+            case 0 ->
+                // no need to wait for the gaps to be filled, the listener can be executed immediately
+                listener.onResponse(null);
             case 1 -> {
-                final Range pendingRange = pendingRanges.get(0);
-                pendingRange.completionListener.addListener(
-                    wrappedListener.map(progress -> null),
-                    Math.min(pendingRange.completionListener.end, range.end())
+                final Range requiredRange = requiredRanges.get(0);
+                requiredRange.completionListener.addListener(
+                    listener.map(progress -> null),
+                    Math.min(requiredRange.completionListener.end, rangeEnd)
                 );
-                return true;
             }
             default -> {
-                final GroupedActionListener groupedActionListener = new GroupedActionListener<>(
-                    pendingRanges.size(),
-                    wrappedListener.map(progress -> null)
-                );
-                pendingRanges.forEach(
-                    r -> r.completionListener.addListener(groupedActionListener, Math.min(r.completionListener.end, range.end()))
-                );
-                return true;
+                try (var listeners = new RefCountingListener(listener)) {
+                    for (Range range : requiredRanges) {
+                        range.completionListener.addListener(listeners.acquire(), Math.min(range.completionListener.end, rangeEnd));
+                    }
+                }
             }
         }
-        return true;
     }
 
     private ActionListener wrapWithAssertions(ActionListener listener) {

From c9e8101d8285005a9841c23ccc1b538c749d293e Mon Sep 17 00:00:00 2001
From: William Brafford 
Date: Wed, 18 Jan 2023 06:48:48 -0500
Subject: [PATCH 05/13] Validate that stable plugins do not break compatibility
 (#92776)

We need to verify, for each release, that our stable plugin APIs
are not breaking.

This commit adds some Gradle support for basic backwards compatibility
testing. On the Gradle side, we add a new qa project to test the
current commit against downloads of released versions, and against
fresh builds of snapshot versions.

As for the actual comparison, we break up the output of javap (the
decompiler) by line and create maps of classes to public class,
field, and method declarations within those class files. We then
check that the signature map from the new jar is not missing any
elements present in the old jar. This method has known limitations,
which are documented in the JarApiComparisonTask class.

Co-authored-by: Mark Vieira 
---
 .../groovy/elasticsearch.stable-api.gradle    |  50 ++++
 .../InternalDistributionBwcSetupPlugin.java   |  41 +++-
 .../gradle/internal/JarApiComparisonTask.java | 224 ++++++++++++++++++
 libs/logging/build.gradle                     |   1 -
 qa/stable-api/build.gradle                    |   6 +
 qa/stable-api/logging/build.gradle            |   4 +
 .../plugin-analysis-api/build.gradle          |   3 +
 qa/stable-api/plugin-api/build.gradle         |   3 +
 settings.gradle                               |   4 +
 9 files changed, 334 insertions(+), 2 deletions(-)
 create mode 100644 build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle
 create mode 100644 build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JarApiComparisonTask.java
 create mode 100644 qa/stable-api/build.gradle
 create mode 100644 qa/stable-api/logging/build.gradle
 create mode 100644 qa/stable-api/plugin-analysis-api/build.gradle
 create mode 100644 qa/stable-api/plugin-api/build.gradle

diff --git a/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle
new file mode 100644
index 0000000000000..c52bd9d1d52c7
--- /dev/null
+++ b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle
@@ -0,0 +1,50 @@
+import org.apache.tools.ant.taskdefs.condition.Os
+import org.elasticsearch.gradle.Version
+import org.elasticsearch.gradle.VersionProperties
+import org.elasticsearch.gradle.internal.BwcVersions
+import org.elasticsearch.gradle.internal.JarApiComparisonTask
+import org.elasticsearch.gradle.internal.info.BuildParams
+
+import static org.elasticsearch.gradle.internal.InternalDistributionBwcSetupPlugin.buildBwcTaskName
+
+configurations {
+  newJar
+}
+
+dependencies {
+  newJar project(":libs:${project.name}")
+}
+
+BuildParams.bwcVersions.withIndexCompatible({ it.onOrAfter(Version.fromString(ext.stableApiSince))
+  && it != VersionProperties.elasticsearchVersion
+}) { bwcVersion, baseName ->
+
+  BwcVersions.UnreleasedVersionInfo unreleasedVersion = BuildParams.bwcVersions.unreleasedInfo(bwcVersion)
+
+  configurations {
+    "oldJar${baseName}" {
+      transitive = false
+    }
+  }
+
+  dependencies {
+    if (unreleasedVersion) {
+      // For unreleased snapshot versions, build them from source
+      "oldJar${baseName}"(files(project(unreleasedVersion.gradleProjectPath).tasks.named(buildBwcTaskName(project.name))))
+    } else {
+      // For released versions, download it
+      "oldJar${baseName}"("org.elasticsearch:${project.name}:${bwcVersion}")
+    }
+  }
+
+  def jarApiComparisonTask = tasks.register(bwcTaskName(bwcVersion), JarApiComparisonTask) {
+    oldJar = configurations."oldJar${baseName}"
+    newJar = configurations.newJar
+  }
+
+  jarApiComparisonTask.configure {
+    onlyIf {
+      !Os.isFamily(Os.FAMILY_WINDOWS)
+    }
+  }
+}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java
index 1f26b8e31ebcf..a32358c6db4f6 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java
@@ -27,6 +27,7 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Locale;
+import java.util.Set;
 import java.util.stream.Collectors;
 
 import javax.inject.Inject;
@@ -120,6 +121,35 @@ private void configureBwcProject(Project project, BwcVersions.UnreleasedVersionI
             buildBwcTaskProvider,
             "assemble"
         );
+
+        // for versions before 8.7.0, we do not need to set up stable API bwc
+        if (bwcVersion.get().before(Version.fromString("8.7.0"))) {
+            return;
+        }
+
+        for (Project stableApiProject : resolveStableProjects(project)) {
+
+            String relativeDir = project.getRootProject().relativePath(stableApiProject.getProjectDir());
+
+            DistributionProjectArtifact stableAnalysisPluginProjectArtifact = new DistributionProjectArtifact(
+                new File(
+                    checkoutDir.get(),
+                    relativeDir + "/build/distributions/" + stableApiProject.getName() + "-" + bwcVersion.get() + "-SNAPSHOT.jar"
+                ),
+                null
+            );
+
+            createBuildBwcTask(
+                bwcSetupExtension,
+                project,
+                bwcVersion,
+                stableApiProject.getName(),
+                "libs/" + stableApiProject.getName(),
+                stableAnalysisPluginProjectArtifact,
+                buildBwcTaskProvider,
+                "assemble"
+            );
+        }
     }
 
     private void registerBwcDistributionArtifacts(Project bwcProject, DistributionProject distributionProject) {
@@ -209,7 +239,16 @@ private static List resolveArchiveProjects(File checkoutDir
         }).collect(Collectors.toList());
     }
 
-    private static String buildBwcTaskName(String projectName) {
+    private static List resolveStableProjects(Project project) {
+        Set stableProjectNames = Set.of("elasticsearch-logging", "elasticsearch-plugin-api", "elasticsearch-plugin-analysis-api");
+        return project.findProject(":libs")
+            .getSubprojects()
+            .stream()
+            .filter(subproject -> stableProjectNames.contains(subproject.getName()))
+            .toList();
+    }
+
+    public static String buildBwcTaskName(String projectName) {
         return "buildBwc"
             + stream(projectName.split("-")).map(i -> i.substring(0, 1).toUpperCase(Locale.ROOT) + i.substring(1))
                 .collect(Collectors.joining());
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JarApiComparisonTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JarApiComparisonTask.java
new file mode 100644
index 0000000000000..b1da632d84cd6
--- /dev/null
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JarApiComparisonTask.java
@@ -0,0 +1,224 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.gradle.internal;
+
+import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitTask;
+import org.gradle.api.file.FileCollection;
+import org.gradle.api.provider.Property;
+import org.gradle.api.tasks.CacheableTask;
+import org.gradle.api.tasks.CompileClasspath;
+import org.gradle.api.tasks.TaskAction;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.jar.JarFile;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.zip.ZipEntry;
+
+/**
+ * This implementation of a jar API comparison uses the "javap" tool to compare
+ * the "signatures" of two different jars. We assume that calling out to javap
+ * is not too expensive at this stage of the stable API project. We also assume
+ * that for every public class, method, and field, javap will print a consistent
+ * single line. This should let us make string comparisons, rather than having
+ * to parse the output of javap.
+ * 

+ * While the above assumptions appear to hold, they are not guaranteed, and hence + * brittle. We could overcome these problems with an ASM implementation of the + * Jar Scanner. + *

+ * We also assume that we will not be comparing multi-version JARs. + *

+ * This "javap" approach has a few further drawbacks: + *

    + *
  1. We don't account for class visibility when examining fields and methods.
  2. + *
  3. We don't consider what is exported from the module. Is a public method from + * a non-exported package considered part of the stable api?
  4. + *
  5. Changing method types to their superclass or return types to an implementation + * class will be considered a change by this approach, even though that doesn't break + * an API.
  6. + *
  7. Finally, moving a method up the class hierarchy is not really a breaking change, + * but it will trip this test.
  8. + *
+ */ +@CacheableTask +public abstract class JarApiComparisonTask extends PrecommitTask { + + @TaskAction + public void compare() { + FileCollection fileCollection = getOldJar().get(); + File newJarFile = getNewJar().get().getSingleFile(); + + Set oldJarNames = fileCollection.getFiles().stream().map(File::getName).collect(Collectors.toSet()); + if (oldJarNames.size() > 1) { + throw new IllegalStateException("Expected a single original jar, but found: " + oldJarNames); + } + if (oldJarNames.contains(newJarFile.getName())) { + throw new IllegalStateException( + "We should be comparing different jars, but original and new jars were both: " + newJarFile.getAbsolutePath() + ); + } + + JarScanner oldJS = new JarScanner(getOldJar().get().getSingleFile().getPath()); + JarScanner newJS = new JarScanner(newJarFile.getPath()); + try { + JarScanner.compareSignatures(oldJS.jarSignature(), newJS.jarSignature()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @CompileClasspath + public abstract Property getOldJar(); + + @CompileClasspath + public abstract Property getNewJar(); + + public static class JarScanner { + + private final String path; + + public JarScanner(String path) { + this.path = path; + } + + private String getPath() { + return path; + } + + /** + * Get a list of class names contained in this jar by looking for file names + * that end in ".class" + */ + List classNames() throws IOException { + Pattern classEnding = Pattern.compile(".*\\.class$"); + try (JarFile jf = new JarFile(this.path)) { + return jf.stream().map(ZipEntry::getName).filter(classEnding.asMatchPredicate()).collect(Collectors.toList()); + } + } + + /** + * Given a path to a file in the jar, get the output of javap as a list of strings. + */ + public List disassembleFromJar(String fileInJarPath, String classpath) { + String location = "jar:file://" + getPath() + "!/" + fileInJarPath; + return disassemble(location, getPath(), classpath); + } + + /** + * Invoke javap on a class file, optionally providing a module path or class path + */ + static List disassemble(String location, String modulePath, String classpath) { + ProcessBuilder pb = new ProcessBuilder(); + List command = new ArrayList<>(); + command.add("javap"); + if (modulePath != null) { + command.add("--module-path"); + command.add(modulePath); + } + if (classpath != null) { + command.add("--class-path"); + command.add(classpath); + } + command.add(location); + pb.command(command.toArray(new String[] {})); + Process p; + try { + p = pb.start(); + p.onExit().get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + InputStream streamToRead = p.exitValue() == 0 ? p.getInputStream() : p.getErrorStream(); + + try (BufferedReader br = new BufferedReader(new InputStreamReader(streamToRead))) { + return br.lines().toList(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Given the output of the javap command, that is, the disassembled class file, + * return a set of signatures for all public classes, methods, and fields. + */ + public static Set signaturesSet(List javapOutput) { + return javapOutput.stream().filter(s -> s.matches("^\\s*public.*")).collect(Collectors.toSet()); + } + + /** + * Given a disassembled module-info.class, return all unqualified exports. + */ + public static Set moduleInfoSignaturesSet(List javapOutput) { + return javapOutput.stream() + .filter(s -> s.matches("^\\s*exports.*")) + .filter(s -> s.matches(".* to$") == false) + .collect(Collectors.toSet()); + } + + /** + * Iterate over classes and gather signatures. + */ + public Map> jarSignature() throws IOException { + return this.classNames().stream().collect(Collectors.toMap(s -> s, s -> { + List disassembled = disassembleFromJar(s, null); + if ("module-info.class".equals(s)) { + return moduleInfoSignaturesSet(disassembled); + } + return signaturesSet(disassembled); + })); + } + + /** + * Comparison: The signatures are maps of class names to public class, field, or method + * declarations. + *

+ * First, we check that the new jar signature contains all the same classes + * as the old jar signature. If not, we return an error. + *

+ * Second, we iterate over the signature for each class. If a signature from the old + * jar is absent in the new jar, we add it to our list of errors. + *

+ * Note that it is fine for the new jar to have additional elements, as this + * is backwards compatible. + */ + public static void compareSignatures(Map> oldSignature, Map> newSignature) { + Set deletedClasses = new HashSet<>(oldSignature.keySet()); + deletedClasses.removeAll(newSignature.keySet()); + if (deletedClasses.size() > 0) { + throw new IllegalStateException("Classes from a previous version not found: " + deletedClasses); + } + + Map> deletedMembersMap = new HashMap<>(); + for (Map.Entry> entry : oldSignature.entrySet()) { + Set deletedMembers = new HashSet<>(entry.getValue()); + deletedMembers.removeAll(newSignature.get(entry.getKey())); + if (deletedMembers.size() > 0) { + deletedMembersMap.put(entry.getKey(), Set.copyOf(deletedMembers)); + } + } + if (deletedMembersMap.size() > 0) { + throw new IllegalStateException( + "Classes from a previous version have been modified, violating backwards compatibility: " + deletedMembersMap + ); + } + } + } +} diff --git a/libs/logging/build.gradle b/libs/logging/build.gradle index 3859c0b12ced6..3004af029cb55 100644 --- a/libs/logging/build.gradle +++ b/libs/logging/build.gradle @@ -9,7 +9,6 @@ apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.build' - tasks.named("loggerUsageCheck").configure {enabled = false } dependencies { diff --git a/qa/stable-api/build.gradle b/qa/stable-api/build.gradle new file mode 100644 index 0000000000000..393c271967c6a --- /dev/null +++ b/qa/stable-api/build.gradle @@ -0,0 +1,6 @@ +subprojects { + apply plugin: 'elasticsearch.java' + apply plugin: 'elasticsearch.bwc-test' + + group = 'org.elasticsearch.qa.stable-api' +} diff --git a/qa/stable-api/logging/build.gradle b/qa/stable-api/logging/build.gradle new file mode 100644 index 0000000000000..ada0dc1d169b2 --- /dev/null +++ b/qa/stable-api/logging/build.gradle @@ -0,0 +1,4 @@ +ext.stableApiSince = "8.7.0" + +apply plugin: 'elasticsearch.stable-api' + diff --git a/qa/stable-api/plugin-analysis-api/build.gradle b/qa/stable-api/plugin-analysis-api/build.gradle new file mode 100644 index 0000000000000..c3fdc92c36bbb --- /dev/null +++ b/qa/stable-api/plugin-analysis-api/build.gradle @@ -0,0 +1,3 @@ +ext.stableApiSince = "8.7.0" + +apply plugin: 'elasticsearch.stable-api' diff --git a/qa/stable-api/plugin-api/build.gradle b/qa/stable-api/plugin-api/build.gradle new file mode 100644 index 0000000000000..c3fdc92c36bbb --- /dev/null +++ b/qa/stable-api/plugin-api/build.gradle @@ -0,0 +1,3 @@ +ext.stableApiSince = "8.7.0" + +apply plugin: 'elasticsearch.stable-api' diff --git a/settings.gradle b/settings.gradle index a157ba4784353..d98f65042e447 100644 --- a/settings.gradle +++ b/settings.gradle @@ -136,6 +136,10 @@ project(":libs").children.each { libsProject -> } } +project(":qa:stable-api").children.each { libsProject -> + libsProject.name = "elasticsearch-${libsProject.name}" +} + project(":test:external-modules").children.each { testProject -> testProject.name = "test-${testProject.name}" } From 510ab87a85ab9b1c10dbe04df19bdeee0f876056 Mon Sep 17 00:00:00 2001 From: Octavio Ranieri Date: Wed, 18 Jan 2023 09:45:33 -0300 Subject: [PATCH 06/13] Add dynamic logging option (#90268) Add dynamic logging option Co-authored-by: Elastic Machine --- .../docs/en/security/troubleshooting.asciidoc | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc index ed30fc6c14550..23748172c92de 100644 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -107,7 +107,20 @@ The role definition might be missing or invalid. |====================== -To help track down these possibilities, add the following lines to the end of +To help track down these possibilities, enable additional logging to troubleshoot further. +You can enable debug logging by configuring the following persistent setting: + +[source, console] +---- +PUT /_cluster/settings +{ + "persistent": { + "logger.org.elasticsearch.xpack.security.authc": "debug" + } +} +---- + +Alternatively, you can add the following lines to the end of the `log4j2.properties` configuration file in the `ES_PATH_CONF`: [source,properties] @@ -116,6 +129,9 @@ logger.authc.name = org.elasticsearch.xpack.security.authc logger.authc.level = DEBUG ---------------- +Refer to <> for more +information. + A successful authentication should produce debug statements that list groups and role mappings. -- From eb1de9493e0fa24f65fbbacc2cf0fe46879e1981 Mon Sep 17 00:00:00 2001 From: Stef Nestor Date: Wed, 18 Jan 2023 05:53:48 -0700 Subject: [PATCH 07/13] [+DOC] node_concurrent_recoveries default (#90330) Notes that `node_concurrent_recoveries` default is 2 (same as both sub-settings which already note that). --- docs/reference/modules/cluster/shards_allocation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index 0c3f028a6c3a1..42282e00e81d4 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -35,7 +35,7 @@ one of the active allocation ids in the cluster state. `cluster.routing.allocation.node_concurrent_recoveries`:: (<>) A shortcut to set both `cluster.routing.allocation.node_concurrent_incoming_recoveries` and - `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. + `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. Defaults to 2. `cluster.routing.allocation.node_initial_primaries_recoveries`:: From 77fe1b81bfde81ca4f54772c919fcb307c89c34e Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Wed, 18 Jan 2023 13:34:18 +0000 Subject: [PATCH 08/13] Ensure plugin class scanner always closes its directory stream (#93027) --- .../elasticsearch/plugin/scanner/ClassReaders.java | 4 ++-- .../plugin/scanner/ClassReadersTests.java | 12 ++++-------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassReaders.java b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassReaders.java index ae87c022b0a88..05059fce06a59 100644 --- a/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassReaders.java +++ b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassReaders.java @@ -45,8 +45,8 @@ public static List ofDirWithJars(String path) { return Collections.emptyList(); } Path dir = Paths.get(path); - try { - return ofPaths(Files.list(dir)); + try (var stream = Files.list(dir)) { + return ofPaths(stream); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassReadersTests.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassReadersTests.java index d707a46cda58d..cf39f83fc4632 100644 --- a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassReadersTests.java +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassReadersTests.java @@ -25,12 +25,8 @@ public class ClassReadersTests extends ESTestCase { - private Path tmpDir() throws IOException { - return createTempDir(); - } - public void testModuleInfoIsNotReturnedAsAClassFromJar() throws IOException { - final Path tmp = tmpDir(); + final Path tmp = createTempDir(getTestName()); final Path dirWithJar = tmp.resolve("jars-dir"); Files.createDirectories(dirWithJar); Path jar = dirWithJar.resolve("api.jar"); @@ -46,7 +42,7 @@ public void testModuleInfoIsNotReturnedAsAClassFromJar() throws IOException { } public void testTwoClassesInAStreamFromJar() throws IOException { - final Path tmp = tmpDir(); + final Path tmp = createTempDir(getTestName()); final Path dirWithJar = tmp.resolve("jars-dir"); Files.createDirectories(dirWithJar); Path jar = dirWithJar.resolve("api.jar"); @@ -67,7 +63,7 @@ public class B {} } public void testStreamOfJarsAndIndividualClasses() throws IOException { - final Path tmp = tmpDir(); + final Path tmp = createTempDir(getTestName()); final Path dirWithJar = tmp.resolve("jars-dir"); Files.createDirectories(dirWithJar); @@ -104,7 +100,7 @@ public class E {} } public void testMultipleJarsInADir() throws IOException { - final Path tmp = tmpDir(); + final Path tmp = createTempDir(getTestName()); final Path dirWithJar = tmp.resolve("jars-dir"); Files.createDirectories(dirWithJar); From 441e77c8cf6594a35601369deba838ac3c575593 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 18 Jan 2023 14:48:14 +0100 Subject: [PATCH 09/13] Patch jackson-core with locally modified class (#92984) while jackson 2.14.2 with FasterXML/jackson-core#882 is still not released we want to patch the jackson-core used by x-content with the modified class that fixes the bug #92480 closes #92480 --- libs/x-content/impl/build.gradle | 2 +- .../impl/es-jackson-core/build.gradle | 32 + .../licenses/jackson-LICENSE.txt | 8 + .../licenses/jackson-NOTICE.txt | 20 + .../core/filter/FilteringParserDelegate.java | 1059 +++++++++++++++++ .../AbstractXContentFilteringTestCase.java | 55 + 6 files changed, 1175 insertions(+), 1 deletion(-) create mode 100644 libs/x-content/impl/es-jackson-core/build.gradle create mode 100644 libs/x-content/impl/es-jackson-core/licenses/jackson-LICENSE.txt create mode 100644 libs/x-content/impl/es-jackson-core/licenses/jackson-NOTICE.txt create mode 100644 libs/x-content/impl/es-jackson-core/src/main/java/com/fasterxml/jackson/core/filter/FilteringParserDelegate.java diff --git a/libs/x-content/impl/build.gradle b/libs/x-content/impl/build.gradle index a0b3a96b5c107..a6985d9c2986c 100644 --- a/libs/x-content/impl/build.gradle +++ b/libs/x-content/impl/build.gradle @@ -15,7 +15,7 @@ String jacksonVersion = "2.14.1" dependencies { compileOnly project(':libs:elasticsearch-core') compileOnly project(':libs:elasticsearch-x-content') - implementation "com.fasterxml.jackson.core:jackson-core:${jacksonVersion}" + implementation project(path: ':libs:x-content:impl:es-jackson-core', configuration: 'shadow') implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${jacksonVersion}" implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${jacksonVersion}" implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${jacksonVersion}" diff --git a/libs/x-content/impl/es-jackson-core/build.gradle b/libs/x-content/impl/es-jackson-core/build.gradle new file mode 100644 index 0000000000000..7e9d8c56c0ad8 --- /dev/null +++ b/libs/x-content/impl/es-jackson-core/build.gradle @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'com.github.johnrengelman.shadow' + +String jacksonVersion = "2.14.1" + +dependencies { + implementation "com.fasterxml.jackson.core:jackson-core:${jacksonVersion}" +} + +['jarHell', 'thirdPartyAudit', 'forbiddenApisMain', 'splitPackagesAudit', 'checkstyleMain', 'licenseHeaders', 'spotlessJavaCheck'].each { + tasks.named(it).configure { + enabled = false + } +} + +tasks.named("dependencyLicenses").configure { + mapping from: /jackson-.*/, to: 'jackson' +} + +shadowJar { + manifest { + attributes 'Multi-Release' : 'true' + } +} diff --git a/libs/x-content/impl/es-jackson-core/licenses/jackson-LICENSE.txt b/libs/x-content/impl/es-jackson-core/licenses/jackson-LICENSE.txt new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/libs/x-content/impl/es-jackson-core/licenses/jackson-LICENSE.txt @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/libs/x-content/impl/es-jackson-core/licenses/jackson-NOTICE.txt b/libs/x-content/impl/es-jackson-core/licenses/jackson-NOTICE.txt new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/libs/x-content/impl/es-jackson-core/licenses/jackson-NOTICE.txt @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/libs/x-content/impl/es-jackson-core/src/main/java/com/fasterxml/jackson/core/filter/FilteringParserDelegate.java b/libs/x-content/impl/es-jackson-core/src/main/java/com/fasterxml/jackson/core/filter/FilteringParserDelegate.java new file mode 100644 index 0000000000000..32e6b06b778d0 --- /dev/null +++ b/libs/x-content/impl/es-jackson-core/src/main/java/com/fasterxml/jackson/core/filter/FilteringParserDelegate.java @@ -0,0 +1,1059 @@ +package com.fasterxml.jackson.core.filter; + +import java.io.IOException; +import java.io.OutputStream; +import java.math.BigDecimal; +import java.math.BigInteger; + +import com.fasterxml.jackson.core.*; +import com.fasterxml.jackson.core.filter.TokenFilter.Inclusion; +import com.fasterxml.jackson.core.util.JsonParserDelegate; + +import static com.fasterxml.jackson.core.JsonTokenId.*; + +/** + * Specialized {@link JsonParserDelegate} that allows use of + * {@link TokenFilter} for outputting a subset of content that + * is visible to caller + * + * @since 2.6 + */ +public class FilteringParserDelegate extends JsonParserDelegate +{ + /* + /********************************************************** + /* Configuration + /********************************************************** + */ + + /** + * Object consulted to determine whether to write parts of content generator + * is asked to write or not. + */ + protected TokenFilter rootFilter; + + /** + * Flag that determines whether filtering will continue after the first + * match is indicated or not: if `false`, output is based on just the first + * full match (returning {@link TokenFilter#INCLUDE_ALL}) and no more + * checks are made; if `true` then filtering will be applied as necessary + * until end of content. + */ + protected boolean _allowMultipleMatches; + + /** + * Flag that determines whether path leading up to included content should + * also be automatically included or not. If `false`, no path inclusion is + * done and only explicitly included entries are output; if `true` then + * path from main level down to match is also included as necessary. + */ + protected TokenFilter.Inclusion _inclusion; + + /* + /********************************************************** + /* State + /********************************************************** + */ + + /** + * Last token retrieved via {@link #nextToken}, if any. + * Null before the first call to nextToken(), + * as well as if token has been explicitly cleared + */ + protected JsonToken _currToken; + + /** + * Last cleared token, if any: that is, value that was in + * effect when {@link #clearCurrentToken} was called. + */ + protected JsonToken _lastClearedToken; + + /** + * During traversal this is the actual "open" parse tree, which sometimes + * is the same as {@link #_exposedContext}, and at other times is ahead + * of it. Note that this context is never null. + */ + protected TokenFilterContext _headContext; + + /** + * In cases where {@link #_headContext} is "ahead" of context exposed to + * caller, this context points to what is currently exposed to caller. + * When the two are in sync, this context reference will be null. + */ + protected TokenFilterContext _exposedContext; + + /** + * State that applies to the item within container, used where applicable. + * Specifically used to pass inclusion state between property name and + * property, and also used for array elements. + */ + protected TokenFilter _itemFilter; + + /** + * Number of tokens for which {@link TokenFilter#INCLUDE_ALL} + * has been returned. + */ + protected int _matchCount; + + /* + /********************************************************** + /* Construction, initialization + /********************************************************** + */ + + @Deprecated + public FilteringParserDelegate(JsonParser p, TokenFilter f, + boolean includePath, boolean allowMultipleMatches) + { + this(p, f, includePath ? Inclusion.INCLUDE_ALL_AND_PATH : Inclusion.ONLY_INCLUDE_ALL, allowMultipleMatches); + } + + /** + * @param p Parser to delegate calls to + * @param f Filter to use + * @param inclusion Definition of inclusion criteria + * @param allowMultipleMatches Whether to allow multiple matches + */ + public FilteringParserDelegate(JsonParser p, TokenFilter f, + TokenFilter.Inclusion inclusion, boolean allowMultipleMatches) + { + super(p); + rootFilter = f; + // and this is the currently active filter for root values + _itemFilter = f; + _headContext = TokenFilterContext.createRootContext(f); + _inclusion = inclusion; + _allowMultipleMatches = allowMultipleMatches; + } + + /* + /********************************************************** + /* Extended API + /********************************************************** + */ + + public TokenFilter getFilter() { return rootFilter; } + + /** + * Accessor for finding number of matches, where specific token and sub-tree + * starting (if structured type) are passed. + * + * @return Number of matches + */ + public int getMatchCount() { + return _matchCount; + } + + /* + /********************************************************** + /* Public API, token accessors + /********************************************************** + */ + + @Override public JsonToken getCurrentToken() { return _currToken; } + @Override public JsonToken currentToken() { return _currToken; } + + @Deprecated // since 2.12 + @Override public final int getCurrentTokenId() { + return currentTokenId(); + } + @Override public final int currentTokenId() { + final JsonToken t = _currToken; + return (t == null) ? JsonTokenId.ID_NO_TOKEN : t.id(); + } + + @Override public boolean hasCurrentToken() { return _currToken != null; } + @Override public boolean hasTokenId(int id) { + final JsonToken t = _currToken; + if (t == null) { + return (JsonTokenId.ID_NO_TOKEN == id); + } + return t.id() == id; + } + + @Override public final boolean hasToken(JsonToken t) { + return (_currToken == t); + } + + @Override public boolean isExpectedStartArrayToken() { return _currToken == JsonToken.START_ARRAY; } + @Override public boolean isExpectedStartObjectToken() { return _currToken == JsonToken.START_OBJECT; } + + @Override public JsonLocation getCurrentLocation() { return delegate.getCurrentLocation(); } + + @Override + public JsonStreamContext getParsingContext() { + return _filterContext(); + } + + // !!! TODO: Verify it works as expected: copied from standard JSON parser impl + @Override + public String getCurrentName() throws IOException { + JsonStreamContext ctxt = _filterContext(); + if (_currToken == JsonToken.START_OBJECT || _currToken == JsonToken.START_ARRAY) { + JsonStreamContext parent = ctxt.getParent(); + return (parent == null) ? null : parent.getCurrentName(); + } + return ctxt.getCurrentName(); + } + + // 2.13: IMPORTANT! Must override along with older getCurrentName() + @Override + public String currentName() throws IOException { + JsonStreamContext ctxt = _filterContext(); + if (_currToken == JsonToken.START_OBJECT || _currToken == JsonToken.START_ARRAY) { + JsonStreamContext parent = ctxt.getParent(); + return (parent == null) ? null : parent.getCurrentName(); + } + return ctxt.getCurrentName(); + } + + /* + /********************************************************** + /* Public API, token state overrides + /********************************************************** + */ + + @Override + public void clearCurrentToken() { + if (_currToken != null) { + _lastClearedToken = _currToken; + _currToken = null; + } + } + + @Override + public JsonToken getLastClearedToken() { return _lastClearedToken; } + + @Override + public void overrideCurrentName(String name) { + // 14-Apr-2015, tatu: Not sure whether this can be supported, and if so, + // what to do with it... Delegation won't work for sure, so let's for + // now throw an exception + throw new UnsupportedOperationException("Can not currently override name during filtering read"); + } + + /* + /********************************************************** + /* Public API, traversal + /********************************************************** + */ + + @Override + public JsonToken nextToken() throws IOException + { + // 23-May-2017, tatu: To be honest, code here is rather hairy and I don't like all + // conditionals; and it seems odd to return `null` but NOT considering input + // as closed... would love a rewrite to simplify/clear up logic here. + + // Check for _allowMultipleMatches - false and at least there is one token - which is _currToken + // check for no buffered context _exposedContext - null + // If all the conditions matches then check for scalar / non-scalar property + + if (!_allowMultipleMatches && (_currToken != null) && (_exposedContext == null)) { + // if scalar, and scalar not present in obj/array and _inclusion == ONLY_INCLUDE_ALL + // and INCLUDE_ALL matched once, return null + if (_currToken.isScalarValue() && !_headContext.isStartHandled() + && _inclusion == Inclusion.ONLY_INCLUDE_ALL + && (_itemFilter == TokenFilter.INCLUDE_ALL)) { + return (_currToken = null); + } + } + // Anything buffered? + TokenFilterContext ctxt = _exposedContext; + + if (ctxt != null) { + while (true) { + JsonToken t = ctxt.nextTokenToRead(); + if (t != null) { + _currToken = t; + return t; + } + // all done with buffered stuff? + if (ctxt == _headContext) { + _exposedContext = null; + if (ctxt.inArray()) { + t = delegate.getCurrentToken(); + _currToken = t; + if (_currToken == JsonToken.END_ARRAY) { + _headContext = _headContext.getParent(); + _itemFilter = _headContext.getFilter(); + } + return t; + } + + // 19-Jul-2021, tatu: [core#700]: following was commented out?! + // Almost! Most likely still have the current token; + // with the sole exception of FIELD_NAME + t = delegate.currentToken(); + if (t == JsonToken.END_OBJECT) { + _headContext = _headContext.getParent(); + _itemFilter = _headContext.getFilter(); + } + if (t != JsonToken.FIELD_NAME) { + _currToken = t; + return t; + } + break; + } + // If not, traverse down the context chain + ctxt = _headContext.findChildOf(ctxt); + _exposedContext = ctxt; + if (ctxt == null) { // should never occur + throw _constructError("Unexpected problem: chain of filtered context broken"); + } + } + } + + // If not, need to read more. If we got any: + JsonToken t = delegate.nextToken(); + if (t == null) { + // no strict need to close, since we have no state here + _currToken = t; + return t; + } + + // otherwise... to include or not? + TokenFilter f; + + switch (t.id()) { + case ID_START_ARRAY: + f = _itemFilter; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildArrayContext(f, true); + return (_currToken = t); + } + if (f == null) { // does this occur? + delegate.skipChildren(); + break; + } + // Otherwise still iffy, need to check + f = _headContext.checkValue(f); + if (f == null) { + delegate.skipChildren(); + break; + } + if (f != TokenFilter.INCLUDE_ALL) { + f = f.filterStartArray(); + } + _itemFilter = f; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildArrayContext(f, true); + return (_currToken = t); + } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) { + // TODO don't count as match? + _headContext = _headContext.createChildArrayContext(f, true); + return (_currToken = t); + } + _headContext = _headContext.createChildArrayContext(f, false); + + // Also: only need buffering if parent path to be included + if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) { + t = _nextTokenWithBuffering(_headContext); + if (t != null) { + _currToken = t; + return t; + } + } + break; + + case ID_START_OBJECT: + f = _itemFilter; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildObjectContext(f, true); + return (_currToken = t); + } + if (f == null) { // does this occur? + delegate.skipChildren(); + break; + } + // Otherwise still iffy, need to check + f = _headContext.checkValue(f); + if (f == null) { + delegate.skipChildren(); + break; + } + if (f != TokenFilter.INCLUDE_ALL) { + f = f.filterStartObject(); + } + _itemFilter = f; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildObjectContext(f, true); + return (_currToken = t); + } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) { + // TODO don't count as match? + _headContext = _headContext.createChildObjectContext(f, true); + return (_currToken = t); + } + _headContext = _headContext.createChildObjectContext(f, false); + // Also: only need buffering if parent path to be included + if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) { + t = _nextTokenWithBuffering(_headContext); + if (t != null) { + _currToken = t; + return t; + } + } + // note: inclusion of surrounding Object handled separately via + // FIELD_NAME + break; + + case ID_END_ARRAY: + case ID_END_OBJECT: + { + boolean returnEnd = _headContext.isStartHandled(); + f = _headContext.getFilter(); + if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) { + f.filterFinishArray(); + } + _headContext = _headContext.getParent(); + _itemFilter = _headContext.getFilter(); + if (returnEnd) { + return (_currToken = t); + } + } + break; + + case ID_FIELD_NAME: + { + final String name = delegate.getCurrentName(); + // note: this will also set 'needToHandleName' + f = _headContext.setFieldName(name); + if (f == TokenFilter.INCLUDE_ALL) { + _itemFilter = f; + return (_currToken = t); + } + if (f == null) { + delegate.nextToken(); + delegate.skipChildren(); + break; + } + f = f.includeProperty(name); + if (f == null) { + delegate.nextToken(); + delegate.skipChildren(); + break; + } + _itemFilter = f; + if (f == TokenFilter.INCLUDE_ALL) { + if (_verifyAllowedMatches()) { + if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) { + return (_currToken = t); + } + } else { + delegate.nextToken(); + delegate.skipChildren(); + } + } + if (_inclusion != Inclusion.ONLY_INCLUDE_ALL) { + t = _nextTokenWithBuffering(_headContext); + if (t != null) { + _currToken = t; + return t; + } + } + break; + } + + default: // scalar value + f = _itemFilter; + if (f == TokenFilter.INCLUDE_ALL) { + return (_currToken = t); + } + if (f != null) { + f = _headContext.checkValue(f); + if ((f == TokenFilter.INCLUDE_ALL) + || ((f != null) && f.includeValue(delegate))) { + if (_verifyAllowedMatches()) { + return (_currToken = t); + } + } + } + // Otherwise not included (leaves must be explicitly included) + break; + } + + // We get here if token was not yet found; offlined handling + return _nextToken2(); + } + + // Offlined handling for cases where there was no buffered token to + // return, and the token read next could not be returned as-is, + // at least not yet, but where we have not yet established that + // buffering is needed. + protected final JsonToken _nextToken2() throws IOException + { + main_loop: + while (true) { + JsonToken t = delegate.nextToken(); + if (t == null) { // is this even legal? + _currToken = t; + return t; + } + TokenFilter f; + + switch (t.id()) { + case ID_START_ARRAY: + f = _itemFilter; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildArrayContext(f, true); + return (_currToken = t); + } + if (f == null) { // does this occur? + delegate.skipChildren(); + continue main_loop; + } + // Otherwise still iffy, need to check + f = _headContext.checkValue(f); + if (f == null) { + delegate.skipChildren(); + continue main_loop; + } + if (f != TokenFilter.INCLUDE_ALL) { + f = f.filterStartArray(); + } + _itemFilter = f; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildArrayContext(f, true); + return (_currToken = t); + } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) { + _headContext = _headContext.createChildArrayContext(f, true); + return (_currToken = t); + } + _headContext = _headContext.createChildArrayContext(f, false); + // but if we didn't figure it out yet, need to buffer possible events + if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) { + t = _nextTokenWithBuffering(_headContext); + if (t != null) { + _currToken = t; + return t; + } + } + continue main_loop; + + case ID_START_OBJECT: + f = _itemFilter; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildObjectContext(f, true); + return (_currToken = t); + } + if (f == null) { // does this occur? + delegate.skipChildren(); + continue main_loop; + } + // Otherwise still iffy, need to check + f = _headContext.checkValue(f); + if (f == null) { + delegate.skipChildren(); + continue main_loop; + } + if (f != TokenFilter.INCLUDE_ALL) { + f = f.filterStartObject(); + } + _itemFilter = f; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildObjectContext(f, true); + return (_currToken = t); + } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) { + _headContext = _headContext.createChildObjectContext(f, true); + return (_currToken = t); + } + _headContext = _headContext.createChildObjectContext(f, false); + if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) { + t = _nextTokenWithBuffering(_headContext); + if (t != null) { + _currToken = t; + return t; + } + } + continue main_loop; + + case ID_END_ARRAY: + { + boolean returnEnd = _headContext.isStartHandled(); + f = _headContext.getFilter(); + if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) { + boolean includeEmpty = f.includeEmptyArray(_headContext.hasCurrentIndex()); + f.filterFinishArray(); + if (includeEmpty) { + return _nextBuffered(_headContext); + } + } + _headContext = _headContext.getParent(); + _itemFilter = _headContext.getFilter(); + if (returnEnd) { + return (_currToken = t); + } + } + continue main_loop; + case ID_END_OBJECT: + { + boolean returnEnd = _headContext.isStartHandled(); + f = _headContext.getFilter(); + if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) { + boolean includeEmpty = f.includeEmptyArray(_headContext.hasCurrentName()); + f.filterFinishObject(); + if (includeEmpty) { + return _nextBuffered(_headContext); + } } + _headContext = _headContext.getParent(); + _itemFilter = _headContext.getFilter(); + if (returnEnd) { + return (_currToken = t); + } + } + continue main_loop; + + case ID_FIELD_NAME: + { + final String name = delegate.getCurrentName(); + f = _headContext.setFieldName(name); + if (f == TokenFilter.INCLUDE_ALL) { + _itemFilter = f; + return (_currToken = t); + } + if (f == null) { // filter out the value + delegate.nextToken(); + delegate.skipChildren(); + continue main_loop; + } + f = f.includeProperty(name); + if (f == null) { // filter out the value + delegate.nextToken(); + delegate.skipChildren(); + continue main_loop; + } + _itemFilter = f; + if (f == TokenFilter.INCLUDE_ALL) { + if (_verifyAllowedMatches()) { + if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) { + return (_currToken = t); + } + } else { + delegate.nextToken(); + delegate.skipChildren(); + } + continue main_loop; + } + if (_inclusion != Inclusion.ONLY_INCLUDE_ALL) { + t = _nextTokenWithBuffering(_headContext); + if (t != null) { + _currToken = t; + return t; + } + } + } + continue main_loop; + + default: // scalar value + f = _itemFilter; + if (f == TokenFilter.INCLUDE_ALL) { + return (_currToken = t); + } + if (f != null) { + f = _headContext.checkValue(f); + if ((f == TokenFilter.INCLUDE_ALL) + || ((f != null) && f.includeValue(delegate))) { + if (_verifyAllowedMatches()) { + return (_currToken = t); + } + } + } + // Otherwise not included (leaves must be explicitly included) + break; + } + } + } + + // Method called when a new potentially included context is found. + protected final JsonToken _nextTokenWithBuffering(final TokenFilterContext buffRoot) + throws IOException + { + main_loop: + while (true) { + JsonToken t = delegate.nextToken(); + if (t == null) { // is this even legal? + return t; + } + TokenFilter f; + + // One simplification here: we know for a fact that the item filter is + // neither null nor 'include all', for most cases; the only exception + // being FIELD_NAME handling + + switch (t.id()) { + case ID_START_ARRAY: + f = _headContext.checkValue(_itemFilter); + if (f == null) { + delegate.skipChildren(); + continue main_loop; + } + if (f != TokenFilter.INCLUDE_ALL) { + f = f.filterStartArray(); + } + _itemFilter = f; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildArrayContext(f, true); + return _nextBuffered(buffRoot); + } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) { + // TODO don't count as match? + _headContext = _headContext.createChildArrayContext(f, true); + return _nextBuffered(buffRoot); + } + _headContext = _headContext.createChildArrayContext(f, false); + continue main_loop; + + case ID_START_OBJECT: + f = _itemFilter; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildObjectContext(f, true); + return t; + } + if (f == null) { // does this occur? + delegate.skipChildren(); + continue main_loop; + } + // Otherwise still iffy, need to check + f = _headContext.checkValue(f); + if (f == null) { + delegate.skipChildren(); + continue main_loop; + } + if (f != TokenFilter.INCLUDE_ALL) { + f = f.filterStartObject(); + } + _itemFilter = f; + if (f == TokenFilter.INCLUDE_ALL) { + _headContext = _headContext.createChildObjectContext(f, true); + return _nextBuffered(buffRoot); + } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) { + // TODO don't count as match? + _headContext = _headContext.createChildArrayContext(f, true); + return _nextBuffered(buffRoot); + } + _headContext = _headContext.createChildObjectContext(f, false); + continue main_loop; + + case ID_END_ARRAY: + { + // Unlike with other loops, here we know that content was NOT + // included (won't get this far otherwise) + f = _headContext.getFilter(); + if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) { + boolean includeEmpty = f.includeEmptyArray(_headContext.hasCurrentIndex()); + f.filterFinishArray(); + if (includeEmpty) { + return _nextBuffered(buffRoot); + } + } + boolean gotEnd = (_headContext == buffRoot); + boolean returnEnd = gotEnd && _headContext.isStartHandled(); + + _headContext = _headContext.getParent(); + _itemFilter = _headContext.getFilter(); + + if (returnEnd) { + return t; + } + if (gotEnd) { + return null; + } + } + continue main_loop; + case ID_END_OBJECT: + { + // Unlike with other loops, here we know that content was NOT + // included (won't get this far otherwise) + f = _headContext.getFilter(); + if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) { + boolean includeEmpty = f.includeEmptyObject(_headContext.hasCurrentName()); + f.filterFinishObject(); + if (includeEmpty) { + _headContext._currentName = _headContext._parent == null + ? null + : _headContext._parent._currentName; + _headContext._needToHandleName = false; + return _nextBuffered(buffRoot); + } + } + boolean gotEnd = (_headContext == buffRoot); + boolean returnEnd = gotEnd && _headContext.isStartHandled(); + + _headContext = _headContext.getParent(); + _itemFilter = _headContext.getFilter(); + + if (returnEnd) { + return t; + } + if (gotEnd) { + return null; + } + } + continue main_loop; + + case ID_FIELD_NAME: + { + final String name = delegate.getCurrentName(); + f = _headContext.setFieldName(name); + if (f == TokenFilter.INCLUDE_ALL) { + _itemFilter = f; + return _nextBuffered(buffRoot); + } + if (f == null) { // filter out the value + delegate.nextToken(); + delegate.skipChildren(); + continue main_loop; + } + f = f.includeProperty(name); + if (f == null) { // filter out the value + delegate.nextToken(); + delegate.skipChildren(); + continue main_loop; + } + _itemFilter = f; + if (f == TokenFilter.INCLUDE_ALL) { + if (_verifyAllowedMatches()) { + return _nextBuffered(buffRoot); + } else { + // edge case: if no more matches allowed, reset filter + // to initial state to prevent missing a token in next iteration + _itemFilter = _headContext.setFieldName(name); + } + } + } + continue main_loop; + + default: // scalar value + f = _itemFilter; + if (f == TokenFilter.INCLUDE_ALL) { + return _nextBuffered(buffRoot); + } + if (f != null) { + f = _headContext.checkValue(f); + if ((f == TokenFilter.INCLUDE_ALL) + || ((f != null) && f.includeValue(delegate))) { + if (_verifyAllowedMatches()) { + return _nextBuffered(buffRoot); + } + } + } + // Otherwise not included (leaves must be explicitly included) + continue main_loop; + } + } + } + + private JsonToken _nextBuffered(TokenFilterContext buffRoot) throws IOException + { + _exposedContext = buffRoot; + TokenFilterContext ctxt = buffRoot; + JsonToken t = ctxt.nextTokenToRead(); + if (t != null) { + return t; + } + while (true) { + // all done with buffered stuff? + if (ctxt == _headContext) { + throw _constructError("Internal error: failed to locate expected buffered tokens"); + /* + _exposedContext = null; + break; + */ + } + // If not, traverse down the context chain + ctxt = _exposedContext.findChildOf(ctxt); + _exposedContext = ctxt; + if (ctxt == null) { // should never occur + throw _constructError("Unexpected problem: chain of filtered context broken"); + } + t = _exposedContext.nextTokenToRead(); + if (t != null) { + return t; + } + } + } + + private final boolean _verifyAllowedMatches() throws IOException { + if (_matchCount == 0 || _allowMultipleMatches) { + ++_matchCount; + return true; + } + return false; + } + + @Override + public JsonToken nextValue() throws IOException { + // Re-implemented same as ParserMinimalBase: + JsonToken t = nextToken(); + if (t == JsonToken.FIELD_NAME) { + t = nextToken(); + } + return t; + } + + /** + * Need to override, re-implement similar to how method defined in + * {@link com.fasterxml.jackson.core.base.ParserMinimalBase}, to keep + * state correct here. + */ + @Override + public JsonParser skipChildren() throws IOException + { + if ((_currToken != JsonToken.START_OBJECT) + && (_currToken != JsonToken.START_ARRAY)) { + return this; + } + int open = 1; + + // Since proper matching of start/end markers is handled + // by nextToken(), we'll just count nesting levels here + while (true) { + JsonToken t = nextToken(); + if (t == null) { // not ideal but for now, just return + return this; + } + if (t.isStructStart()) { + ++open; + } else if (t.isStructEnd()) { + if (--open == 0) { + return this; + } + } + } + } + + /* + /********************************************************** + /* Public API, access to token information, text + /********************************************************** + */ + + // 19-Jul-2021, tatu: Cannot quite just delegate these methods due to oddity + // of property name token, which may be buffered. + + @Override public String getText() throws IOException { + if (_currToken == JsonToken.FIELD_NAME) { + return currentName(); + } + return delegate.getText(); + } + + @Override public boolean hasTextCharacters() { + if (_currToken == JsonToken.FIELD_NAME) { + return false; + } + return delegate.hasTextCharacters(); + } + + @Override public char[] getTextCharacters() throws IOException { + // Not optimal but is correct, unlike delegating (as underlying stream + // may point to something else due to buffering) + if (_currToken == JsonToken.FIELD_NAME) { + return currentName().toCharArray(); + } + return delegate.getTextCharacters(); + } + + @Override public int getTextLength() throws IOException { + if (_currToken == JsonToken.FIELD_NAME) { + return currentName().length(); + } + return delegate.getTextLength(); + } + @Override public int getTextOffset() throws IOException { + if (_currToken == JsonToken.FIELD_NAME) { + return 0; + } + return delegate.getTextOffset(); + } + + /* + /********************************************************** + /* Public API, access to token information, numeric + /********************************************************** + */ + + @Override + public BigInteger getBigIntegerValue() throws IOException { return delegate.getBigIntegerValue(); } + + @Override + public boolean getBooleanValue() throws IOException { return delegate.getBooleanValue(); } + + @Override + public byte getByteValue() throws IOException { return delegate.getByteValue(); } + + @Override + public short getShortValue() throws IOException { return delegate.getShortValue(); } + + @Override + public BigDecimal getDecimalValue() throws IOException { return delegate.getDecimalValue(); } + + @Override + public double getDoubleValue() throws IOException { return delegate.getDoubleValue(); } + + @Override + public float getFloatValue() throws IOException { return delegate.getFloatValue(); } + + @Override + public int getIntValue() throws IOException { return delegate.getIntValue(); } + + @Override + public long getLongValue() throws IOException { return delegate.getLongValue(); } + + @Override + public NumberType getNumberType() throws IOException { return delegate.getNumberType(); } + + @Override + public Number getNumberValue() throws IOException { return delegate.getNumberValue(); } + + /* + /********************************************************** + /* Public API, access to token information, coercion/conversion + /********************************************************** + */ + + @Override public int getValueAsInt() throws IOException { return delegate.getValueAsInt(); } + @Override public int getValueAsInt(int defaultValue) throws IOException { return delegate.getValueAsInt(defaultValue); } + @Override public long getValueAsLong() throws IOException { return delegate.getValueAsLong(); } + @Override public long getValueAsLong(long defaultValue) throws IOException { return delegate.getValueAsLong(defaultValue); } + @Override public double getValueAsDouble() throws IOException { return delegate.getValueAsDouble(); } + @Override public double getValueAsDouble(double defaultValue) throws IOException { return delegate.getValueAsDouble(defaultValue); } + @Override public boolean getValueAsBoolean() throws IOException { return delegate.getValueAsBoolean(); } + @Override public boolean getValueAsBoolean(boolean defaultValue) throws IOException { return delegate.getValueAsBoolean(defaultValue); } + + @Override public String getValueAsString() throws IOException { + if (_currToken == JsonToken.FIELD_NAME) { + return currentName(); + } + return delegate.getValueAsString(); + } + @Override public String getValueAsString(String defaultValue) throws IOException { + if (_currToken == JsonToken.FIELD_NAME) { + return currentName(); + } + return delegate.getValueAsString(defaultValue); + } + + /* + /********************************************************** + /* Public API, access to token values, other + /********************************************************** + */ + + @Override public Object getEmbeddedObject() throws IOException { return delegate.getEmbeddedObject(); } + @Override public byte[] getBinaryValue(Base64Variant b64variant) throws IOException { return delegate.getBinaryValue(b64variant); } + @Override public int readBinaryValue(Base64Variant b64variant, OutputStream out) throws IOException { return delegate.readBinaryValue(b64variant, out); } + @Override public JsonLocation getTokenLocation() { return delegate.getTokenLocation(); } + + /* + /********************************************************** + /* Internal helper methods + /********************************************************** + */ + + protected JsonStreamContext _filterContext() { + if (_exposedContext != null) { + return _exposedContext; + } + return _headContext; + } +} diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java index 9dd5975cc1659..90555c6fed455 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java @@ -334,6 +334,61 @@ private void testFilter(Builder expected, Builder sample, Set includes, assertFilterResult(expected.apply(createBuilder()), filter(sample, includes, excludes, matchFieldNamesWithDots)); } + public void testArrayWithEmptyObjectInInclude() throws IOException { + testFilter( + builder -> builder.startObject().startArray("foo").startObject().field("bar", "baz").endObject().endArray().endObject(), + builder -> builder.startObject() + .startArray("foo") + .startObject() + .field("bar", "baz") + .endObject() + .startObject() + .endObject() + .endArray() + .endObject(), + singleton("foo.bar"), + emptySet(), + true + ); + } + + public void testArrayWithEmptyArrayInInclude() throws IOException { + testFilter( + builder -> builder.startObject().startArray("foo").startObject().field("bar", "baz").endObject().endArray().endObject(), + builder -> builder.startObject() + .startArray("foo") + .startObject() + .field("bar", "baz") + .endObject() + .startArray() + .endArray() + .endArray() + .endObject(), + singleton("foo.bar"), + emptySet(), + true + ); + } + + public void testArrayWithLastObjectSkipped() throws IOException { + testFilter( + builder -> builder.startObject().startArray("foo").startObject().field("bar", "baz").endObject().endArray().endObject(), + builder -> builder.startObject() + .startArray("foo") + .startObject() + .field("bar", "baz") + .endObject() + .startObject() + .field("skipped", "value") + .endObject() + .endArray() + .endObject(), + singleton("foo.bar"), + emptySet(), + true + ); + } + protected abstract void assertFilterResult(XContentBuilder expected, XContentBuilder actual); protected abstract XContentType getXContentType(); From f60d614dc09cdfc04dc49cbc35894b69159609c5 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 18 Jan 2023 07:56:56 -0800 Subject: [PATCH 10/13] Remove Gradle read-only cache registration The simple `cp -R` implementation seemed to cause corruption issues and not all of our CI worker images have rsync installed. We'll need to sort something else out, which might just be relying on per-BWC branch cache directories for now. --- .ci/packer_cache.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 3934df539d351..43d1c5a82b90d 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -44,9 +44,3 @@ fi ## therefore we run main _AFTER_ we run 6.8 which uses an earlier gradle version export JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA} ./gradlew --parallel clean -s resolveAllDependencies -Dorg.gradle.warning.mode=none -Drecurse.bwc=true - -## Copy all dependencies into a "read-only" location to be used by nested Gradle builds -mkdir -p ${HOME}/gradle_ro_cache -rsync -r ${HOME}/.gradle/caches/modules-2 ${HOME}/gradle_ro_cache -rm ${HOME}/gradle_ro_cache/modules-2/gc.properties -rm ${HOME}/gradle_ro_cache/modules-2/*.lock From 8f37934a762dd3cb6fe99f2ba68a0795012e61ea Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 18 Jan 2023 16:59:09 +0100 Subject: [PATCH 11/13] Exclude the class from jackson jar (#93052) in #92984 we override a file in jackson jar, but we rely on gradle internals which might change at any point. This fixes this by excluding a element from a jar and allowing a new class to be added --- libs/x-content/impl/es-jackson-core/build.gradle | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libs/x-content/impl/es-jackson-core/build.gradle b/libs/x-content/impl/es-jackson-core/build.gradle index 7e9d8c56c0ad8..7c32f7bdf5b99 100644 --- a/libs/x-content/impl/es-jackson-core/build.gradle +++ b/libs/x-content/impl/es-jackson-core/build.gradle @@ -26,6 +26,9 @@ tasks.named("dependencyLicenses").configure { } shadowJar { + exclude { element -> + element.file == null && element.path.endsWith("FilteringParserDelegate.class") + } manifest { attributes 'Multi-Release' : 'true' } From 89fb4d641f8a2cc9c6f824278c98d0a6ca4efd1a Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Wed, 18 Jan 2023 16:08:19 +0000 Subject: [PATCH 12/13] Move nested object redundant include fix into NestedObjectMapper (#92876) In order to stop deeply nested mappers indexing their content multiple times, we have some logic in RootObjectMapper that finds 'redundant' includes - that is, if a nested object mapper has both 'includeInRoot' and 'includeInParent' set, and its parent is already included in root, then we remove its 'includeInRoot' setting. This is currently done with an explicit fixRedundantIncludes() call directly on the root object mapper after it has been constructed. In the past, bugs have been caused by this call being missed, and it would be neater if this logic was handled internally by NestedObjectMapper itself. This commit adds a new MapperBuilderContext, private to NestedObjectMapper, that holds information about the include status of a nested parent. Fixing of includes is now done entirely within NestedObjectMapper itself, either in the builder or in its merge logic. --- .../index/mapper/DocumentParser.java | 1 - .../index/mapper/MapperBuilderContext.java | 2 +- .../index/mapper/MapperService.java | 1 - .../index/mapper/NestedObjectMapper.java | 51 ++++++++++++- .../index/mapper/RootObjectMapper.java | 30 -------- .../index/mapper/NestedObjectMapperTests.java | 75 +++++++++++++++++++ .../index/mapper/ObjectMapperMergeTests.java | 22 ------ 7 files changed, 125 insertions(+), 57 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index b985281860b4e..4a25179479dcf 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -232,7 +232,6 @@ static Mapping createDynamicUpdate(DocumentParserContext context) { rootBuilder.addRuntimeField(runtimeField); } RootObjectMapper root = rootBuilder.build(MapperBuilderContext.root(context.mappingLookup().isSourceSynthetic())); - root.fixRedundantIncludes(); return context.mappingLookup().getMapping().mappingUpdate(root); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java index 8ed6bf3d1db7e..f1daf17f3ee69 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java @@ -16,7 +16,7 @@ /** * Holds context for building Mapper objects from their Builders */ -public final class MapperBuilderContext { +public class MapperBuilderContext { /** * The root context, to be used when building a tree of mappers diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index cbbc0ec7ab288..7a96d7c8e7d4f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -352,7 +352,6 @@ public DocumentMapper merge(String type, CompressedXContent mappingSource, Merge private DocumentMapper newDocumentMapper(Mapping mapping, MergeReason reason, CompressedXContent mappingSource) { DocumentMapper newMapper = new DocumentMapper(documentParser, mapping, mappingSource); - newMapper.mapping().getRoot().fixRedundantIncludes(); newMapper.validate(indexSettings, reason != MergeReason.MAPPING_RECOVERY); return newMapper; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 45f1363fb1a36..b82c13574e14b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -49,7 +49,21 @@ Builder includeInParent(boolean includeInParent) { @Override public NestedObjectMapper build(MapperBuilderContext context) { - return new NestedObjectMapper(name, context.buildFullName(name), buildMappers(context.createChildContext(name)), this); + boolean parentIncludedInRoot = this.includeInRoot.value(); + if (context instanceof NestedMapperBuilderContext nc) { + // we're already inside a nested mapper, so adjust our includes + if (nc.parentIncludedInRoot && this.includeInParent.value()) { + this.includeInRoot = Explicit.IMPLICIT_FALSE; + } + } else { + // this is a top-level nested mapper, so include_in_parent = include_in_root + parentIncludedInRoot |= this.includeInParent.value(); + if (this.includeInParent.value()) { + this.includeInRoot = Explicit.IMPLICIT_FALSE; + } + } + NestedMapperBuilderContext nestedContext = new NestedMapperBuilderContext(context.buildFullName(name), parentIncludedInRoot); + return new NestedObjectMapper(name, context.buildFullName(name), buildMappers(nestedContext), this); } } @@ -89,6 +103,21 @@ protected static void parseNested(String name, Map node, NestedO } } + private static class NestedMapperBuilderContext extends MapperBuilderContext { + + final boolean parentIncludedInRoot; + + NestedMapperBuilderContext(String path, boolean parentIncludedInRoot) { + super(path, false); + this.parentIncludedInRoot = parentIncludedInRoot; + } + + @Override + public MapperBuilderContext createChildContext(String name) { + return new NestedMapperBuilderContext(buildFullName(name), parentIncludedInRoot); + } + } + private Explicit includeInRoot; private Explicit includeInParent; private final String nestedTypePath; @@ -153,7 +182,7 @@ public ObjectMapper.Builder newBuilder(Version indexVersionCreated) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(simpleName()); builder.field("type", CONTENT_TYPE); - if (includeInParent.value()) { + if (includeInParent.explicit() && includeInParent.value()) { builder.field("include_in_parent", includeInParent.value()); } if (includeInRoot.value()) { @@ -191,10 +220,28 @@ public ObjectMapper merge(Mapper mergeWith, MapperService.MergeReason reason, Ma throw new MapperException("the [include_in_root] parameter can't be updated on a nested object mapping"); } } + if (parentBuilderContext instanceof NestedMapperBuilderContext nc) { + if (nc.parentIncludedInRoot && toMerge.includeInParent.value()) { + toMerge.includeInRoot = Explicit.IMPLICIT_FALSE; + } + } else { + if (toMerge.includeInParent.value()) { + toMerge.includeInRoot = Explicit.IMPLICIT_FALSE; + } + } toMerge.doMerge(mergeWithObject, reason, parentBuilderContext); return toMerge; } + @Override + protected MapperBuilderContext createChildContext(MapperBuilderContext mapperBuilderContext, String name) { + boolean parentIncludedInRoot = this.includeInRoot.value(); + if (mapperBuilderContext instanceof NestedMapperBuilderContext == false) { + parentIncludedInRoot |= this.includeInParent.value(); + } + return new NestedMapperBuilderContext(mapperBuilderContext.buildFullName(name), parentIncludedInRoot); + } + @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { throw new IllegalArgumentException("field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source"); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 9e5516b978ee2..96d494a44b8f6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -120,36 +120,6 @@ public RootObjectMapper build(MapperBuilderContext context) { } } - /** - * Removes redundant root includes in {@link NestedObjectMapper} trees to avoid duplicate - * fields on the root mapper when {@code isIncludeInRoot} is {@code true} for a node that is - * itself included into a parent node, for which either {@code isIncludeInRoot} is - * {@code true} or which is transitively included in root by a chain of nodes with - * {@code isIncludeInParent} returning {@code true}. - */ - // TODO it would be really nice to make this an implementation detail of NestedObjectMapper - // and run it as part of the builder, but this does not yet work because of the way that - // index templates are merged together. If merge() was run on Builder objects rather than - // on Mappers then we could move this. - public void fixRedundantIncludes() { - fixRedundantIncludes(this, true); - } - - private static void fixRedundantIncludes(ObjectMapper objectMapper, boolean parentIncluded) { - for (Mapper mapper : objectMapper) { - if (mapper instanceof NestedObjectMapper child) { - boolean isNested = child.isNested(); - boolean includeInRootViaParent = parentIncluded && isNested && child.isIncludeInParent(); - boolean includedInRoot = isNested && child.isIncludeInRoot(); - if (includeInRootViaParent && includedInRoot) { - child.setIncludeInParent(true); - child.setIncludeInRoot(false); - } - fixRedundantIncludes(child, includeInRootViaParent || includedInRoot); - } - } - } - private Explicit dynamicDateTimeFormatters; private Explicit dateDetection; private Explicit numericDetection; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index d106908fad5d3..a8b804017d1cf 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1439,4 +1439,79 @@ public void testNestedDoesNotSupportSubobjectsParameter() { ); assertEquals("Failed to parse mapping: Nested type [nested1] does not support [subobjects] parameter", exception.getMessage()); } + + public void testIndexTemplatesMergeIncludes() throws IOException { + { + MapperService mapperService = createMapperService(""" + { "_doc" : { "properties" : { + "field" : { + "type" : "nested", + "include_in_root" : true, + "properties" : { + "text" : { "type" : "text" } + } + } + }}} + """); + merge(mapperService, MergeReason.INDEX_TEMPLATE, """ + { "_doc" : { "properties" : { + "field" : { + "type" : "nested", + "include_in_parent" : true, + "properties" : { + "text" : { "type" : "text" } + } + } + }}} + """); + assertThat(Strings.toString(mapperService.documentMapper().mapping()), containsString(""" + {"type":"nested","include_in_parent":true,"properties":{""")); + } + { + MapperService mapperService = createMapperService(""" + { "_doc" : { "properties" : { + "field" : { + "type" : "nested", + "include_in_parent" : true, + "properties" : { + "text" : { "type" : "text" } + } + } + }}} + """); + merge(mapperService, MergeReason.INDEX_TEMPLATE, """ + { "_doc" : { "properties" : { + "field" : { + "type" : "nested", + "include_in_root" : true, + "properties" : { + "text" : { "type" : "text" } + } + } + }}} + """); + assertThat(Strings.toString(mapperService.documentMapper().mapping()), containsString(""" + {"type":"nested","include_in_parent":true,"properties":{""")); + } + } + + public void testMergeNested() { + NestedObjectMapper firstMapper = new NestedObjectMapper.Builder("nested1", Version.CURRENT).includeInParent(true) + .includeInRoot(true) + .build(MapperBuilderContext.root(false)); + NestedObjectMapper secondMapper = new NestedObjectMapper.Builder("nested1", Version.CURRENT).includeInParent(false) + .includeInRoot(true) + .build(MapperBuilderContext.root(false)); + + MapperException e = expectThrows(MapperException.class, () -> firstMapper.merge(secondMapper, MapperBuilderContext.root(false))); + assertThat(e.getMessage(), containsString("[include_in_parent] parameter can't be updated on a nested object mapping")); + + NestedObjectMapper result = (NestedObjectMapper) firstMapper.merge( + secondMapper, + MapperService.MergeReason.INDEX_TEMPLATE, + MapperBuilderContext.root(false) + ); + assertFalse(result.isIncludeInParent()); + assertTrue(result.isIncludeInRoot()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 089e1692a1a01..5749ce4650b1c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -13,8 +13,6 @@ import java.util.Collections; -import static org.hamcrest.Matchers.containsString; - public class ObjectMapperMergeTests extends ESTestCase { private final RootObjectMapper rootObjectMapper = createMapping(false, true, true, false); @@ -117,26 +115,6 @@ public void testMergeDisabledRootMapper() { assertEquals("test", merged.runtimeFields().iterator().next().name()); } - public void testMergeNested() { - NestedObjectMapper firstMapper = new NestedObjectMapper.Builder("nested1", Version.CURRENT).includeInParent(true) - .includeInRoot(true) - .build(MapperBuilderContext.root(false)); - NestedObjectMapper secondMapper = new NestedObjectMapper.Builder("nested1", Version.CURRENT).includeInParent(false) - .includeInRoot(true) - .build(MapperBuilderContext.root(false)); - - MapperException e = expectThrows(MapperException.class, () -> firstMapper.merge(secondMapper, MapperBuilderContext.root(false))); - assertThat(e.getMessage(), containsString("[include_in_parent] parameter can't be updated on a nested object mapping")); - - NestedObjectMapper result = (NestedObjectMapper) firstMapper.merge( - secondMapper, - MapperService.MergeReason.INDEX_TEMPLATE, - MapperBuilderContext.root(false) - ); - assertFalse(result.isIncludeInParent()); - assertTrue(result.isIncludeInRoot()); - } - public void testMergedFieldNamesFieldWithDotsSubobjectsFalseAtRoot() { RootObjectMapper mergeInto = createRootSubobjectFalseLeafWithDots(); RootObjectMapper mergeWith = createRootSubobjectFalseLeafWithDots(); From 394d4f0efa77173cde93ea1e5929131d28c9a6aa Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Wed, 18 Jan 2023 17:26:45 +0100 Subject: [PATCH 13/13] mute: FileSettingsRoleMappingsRestartIT (#93049) --- .../xpack/security/FileSettingsRoleMappingsRestartIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java index 416af1d64d328..5ef6a2d2c97b9 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java @@ -109,6 +109,7 @@ public void clusterChanged(ClusterChangedEvent event) { return new Tuple<>(savedClusterState, metadataVersion); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/93048") public void testReservedStatePersistsOnRestart() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0);