diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh
index 3934df539d351..43d1c5a82b90d 100755
--- a/.ci/packer_cache.sh
+++ b/.ci/packer_cache.sh
@@ -44,9 +44,3 @@ fi
## therefore we run main _AFTER_ we run 6.8 which uses an earlier gradle version
export JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA}
./gradlew --parallel clean -s resolveAllDependencies -Dorg.gradle.warning.mode=none -Drecurse.bwc=true
-
-## Copy all dependencies into a "read-only" location to be used by nested Gradle builds
-mkdir -p ${HOME}/gradle_ro_cache
-rsync -r ${HOME}/.gradle/caches/modules-2 ${HOME}/gradle_ro_cache
-rm ${HOME}/gradle_ro_cache/modules-2/gc.properties
-rm ${HOME}/gradle_ro_cache/modules-2/*.lock
diff --git a/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle
new file mode 100644
index 0000000000000..c52bd9d1d52c7
--- /dev/null
+++ b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle
@@ -0,0 +1,50 @@
+import org.apache.tools.ant.taskdefs.condition.Os
+import org.elasticsearch.gradle.Version
+import org.elasticsearch.gradle.VersionProperties
+import org.elasticsearch.gradle.internal.BwcVersions
+import org.elasticsearch.gradle.internal.JarApiComparisonTask
+import org.elasticsearch.gradle.internal.info.BuildParams
+
+import static org.elasticsearch.gradle.internal.InternalDistributionBwcSetupPlugin.buildBwcTaskName
+
+configurations {
+ newJar
+}
+
+dependencies {
+ newJar project(":libs:${project.name}")
+}
+
+BuildParams.bwcVersions.withIndexCompatible({ it.onOrAfter(Version.fromString(ext.stableApiSince))
+ && it != VersionProperties.elasticsearchVersion
+}) { bwcVersion, baseName ->
+
+ BwcVersions.UnreleasedVersionInfo unreleasedVersion = BuildParams.bwcVersions.unreleasedInfo(bwcVersion)
+
+ configurations {
+ "oldJar${baseName}" {
+ transitive = false
+ }
+ }
+
+ dependencies {
+ if (unreleasedVersion) {
+ // For unreleased snapshot versions, build them from source
+ "oldJar${baseName}"(files(project(unreleasedVersion.gradleProjectPath).tasks.named(buildBwcTaskName(project.name))))
+ } else {
+ // For released versions, download it
+ "oldJar${baseName}"("org.elasticsearch:${project.name}:${bwcVersion}")
+ }
+ }
+
+ def jarApiComparisonTask = tasks.register(bwcTaskName(bwcVersion), JarApiComparisonTask) {
+ oldJar = configurations."oldJar${baseName}"
+ newJar = configurations.newJar
+ }
+
+ jarApiComparisonTask.configure {
+ onlyIf {
+ !Os.isFamily(Os.FAMILY_WINDOWS)
+ }
+ }
+}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java
index 1f26b8e31ebcf..a32358c6db4f6 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java
@@ -27,6 +27,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
+import java.util.Set;
import java.util.stream.Collectors;
import javax.inject.Inject;
@@ -120,6 +121,35 @@ private void configureBwcProject(Project project, BwcVersions.UnreleasedVersionI
buildBwcTaskProvider,
"assemble"
);
+
+ // for versions before 8.7.0, we do not need to set up stable API bwc
+ if (bwcVersion.get().before(Version.fromString("8.7.0"))) {
+ return;
+ }
+
+ for (Project stableApiProject : resolveStableProjects(project)) {
+
+ String relativeDir = project.getRootProject().relativePath(stableApiProject.getProjectDir());
+
+ DistributionProjectArtifact stableAnalysisPluginProjectArtifact = new DistributionProjectArtifact(
+ new File(
+ checkoutDir.get(),
+ relativeDir + "/build/distributions/" + stableApiProject.getName() + "-" + bwcVersion.get() + "-SNAPSHOT.jar"
+ ),
+ null
+ );
+
+ createBuildBwcTask(
+ bwcSetupExtension,
+ project,
+ bwcVersion,
+ stableApiProject.getName(),
+ "libs/" + stableApiProject.getName(),
+ stableAnalysisPluginProjectArtifact,
+ buildBwcTaskProvider,
+ "assemble"
+ );
+ }
}
private void registerBwcDistributionArtifacts(Project bwcProject, DistributionProject distributionProject) {
@@ -209,7 +239,16 @@ private static List
+ * While the above assumptions appear to hold, they are not guaranteed, and hence
+ * brittle. We could overcome these problems with an ASM implementation of the
+ * Jar Scanner.
+ *
+ * We also assume that we will not be comparing multi-version JARs.
+ *
+ * This "javap" approach has a few further drawbacks:
+ *
+ *
+ */
+@CacheableTask
+public abstract class JarApiComparisonTask extends PrecommitTask {
+
+ @TaskAction
+ public void compare() {
+ FileCollection fileCollection = getOldJar().get();
+ File newJarFile = getNewJar().get().getSingleFile();
+
+ Set
nextToken()
,
+ * as well as if token has been explicitly cleared
+ */
+ protected JsonToken _currToken;
+
+ /**
+ * Last cleared token, if any: that is, value that was in
+ * effect when {@link #clearCurrentToken} was called.
+ */
+ protected JsonToken _lastClearedToken;
+
+ /**
+ * During traversal this is the actual "open" parse tree, which sometimes
+ * is the same as {@link #_exposedContext}, and at other times is ahead
+ * of it. Note that this context is never null.
+ */
+ protected TokenFilterContext _headContext;
+
+ /**
+ * In cases where {@link #_headContext} is "ahead" of context exposed to
+ * caller, this context points to what is currently exposed to caller.
+ * When the two are in sync, this context reference will be null
.
+ */
+ protected TokenFilterContext _exposedContext;
+
+ /**
+ * State that applies to the item within container, used where applicable.
+ * Specifically used to pass inclusion state between property name and
+ * property, and also used for array elements.
+ */
+ protected TokenFilter _itemFilter;
+
+ /**
+ * Number of tokens for which {@link TokenFilter#INCLUDE_ALL}
+ * has been returned.
+ */
+ protected int _matchCount;
+
+ /*
+ /**********************************************************
+ /* Construction, initialization
+ /**********************************************************
+ */
+
+ @Deprecated
+ public FilteringParserDelegate(JsonParser p, TokenFilter f,
+ boolean includePath, boolean allowMultipleMatches)
+ {
+ this(p, f, includePath ? Inclusion.INCLUDE_ALL_AND_PATH : Inclusion.ONLY_INCLUDE_ALL, allowMultipleMatches);
+ }
+
+ /**
+ * @param p Parser to delegate calls to
+ * @param f Filter to use
+ * @param inclusion Definition of inclusion criteria
+ * @param allowMultipleMatches Whether to allow multiple matches
+ */
+ public FilteringParserDelegate(JsonParser p, TokenFilter f,
+ TokenFilter.Inclusion inclusion, boolean allowMultipleMatches)
+ {
+ super(p);
+ rootFilter = f;
+ // and this is the currently active filter for root values
+ _itemFilter = f;
+ _headContext = TokenFilterContext.createRootContext(f);
+ _inclusion = inclusion;
+ _allowMultipleMatches = allowMultipleMatches;
+ }
+
+ /*
+ /**********************************************************
+ /* Extended API
+ /**********************************************************
+ */
+
+ public TokenFilter getFilter() { return rootFilter; }
+
+ /**
+ * Accessor for finding number of matches, where specific token and sub-tree
+ * starting (if structured type) are passed.
+ *
+ * @return Number of matches
+ */
+ public int getMatchCount() {
+ return _matchCount;
+ }
+
+ /*
+ /**********************************************************
+ /* Public API, token accessors
+ /**********************************************************
+ */
+
+ @Override public JsonToken getCurrentToken() { return _currToken; }
+ @Override public JsonToken currentToken() { return _currToken; }
+
+ @Deprecated // since 2.12
+ @Override public final int getCurrentTokenId() {
+ return currentTokenId();
+ }
+ @Override public final int currentTokenId() {
+ final JsonToken t = _currToken;
+ return (t == null) ? JsonTokenId.ID_NO_TOKEN : t.id();
+ }
+
+ @Override public boolean hasCurrentToken() { return _currToken != null; }
+ @Override public boolean hasTokenId(int id) {
+ final JsonToken t = _currToken;
+ if (t == null) {
+ return (JsonTokenId.ID_NO_TOKEN == id);
+ }
+ return t.id() == id;
+ }
+
+ @Override public final boolean hasToken(JsonToken t) {
+ return (_currToken == t);
+ }
+
+ @Override public boolean isExpectedStartArrayToken() { return _currToken == JsonToken.START_ARRAY; }
+ @Override public boolean isExpectedStartObjectToken() { return _currToken == JsonToken.START_OBJECT; }
+
+ @Override public JsonLocation getCurrentLocation() { return delegate.getCurrentLocation(); }
+
+ @Override
+ public JsonStreamContext getParsingContext() {
+ return _filterContext();
+ }
+
+ // !!! TODO: Verify it works as expected: copied from standard JSON parser impl
+ @Override
+ public String getCurrentName() throws IOException {
+ JsonStreamContext ctxt = _filterContext();
+ if (_currToken == JsonToken.START_OBJECT || _currToken == JsonToken.START_ARRAY) {
+ JsonStreamContext parent = ctxt.getParent();
+ return (parent == null) ? null : parent.getCurrentName();
+ }
+ return ctxt.getCurrentName();
+ }
+
+ // 2.13: IMPORTANT! Must override along with older getCurrentName()
+ @Override
+ public String currentName() throws IOException {
+ JsonStreamContext ctxt = _filterContext();
+ if (_currToken == JsonToken.START_OBJECT || _currToken == JsonToken.START_ARRAY) {
+ JsonStreamContext parent = ctxt.getParent();
+ return (parent == null) ? null : parent.getCurrentName();
+ }
+ return ctxt.getCurrentName();
+ }
+
+ /*
+ /**********************************************************
+ /* Public API, token state overrides
+ /**********************************************************
+ */
+
+ @Override
+ public void clearCurrentToken() {
+ if (_currToken != null) {
+ _lastClearedToken = _currToken;
+ _currToken = null;
+ }
+ }
+
+ @Override
+ public JsonToken getLastClearedToken() { return _lastClearedToken; }
+
+ @Override
+ public void overrideCurrentName(String name) {
+ // 14-Apr-2015, tatu: Not sure whether this can be supported, and if so,
+ // what to do with it... Delegation won't work for sure, so let's for
+ // now throw an exception
+ throw new UnsupportedOperationException("Can not currently override name during filtering read");
+ }
+
+ /*
+ /**********************************************************
+ /* Public API, traversal
+ /**********************************************************
+ */
+
+ @Override
+ public JsonToken nextToken() throws IOException
+ {
+ // 23-May-2017, tatu: To be honest, code here is rather hairy and I don't like all
+ // conditionals; and it seems odd to return `null` but NOT considering input
+ // as closed... would love a rewrite to simplify/clear up logic here.
+
+ // Check for _allowMultipleMatches - false and at least there is one token - which is _currToken
+ // check for no buffered context _exposedContext - null
+ // If all the conditions matches then check for scalar / non-scalar property
+
+ if (!_allowMultipleMatches && (_currToken != null) && (_exposedContext == null)) {
+ // if scalar, and scalar not present in obj/array and _inclusion == ONLY_INCLUDE_ALL
+ // and INCLUDE_ALL matched once, return null
+ if (_currToken.isScalarValue() && !_headContext.isStartHandled()
+ && _inclusion == Inclusion.ONLY_INCLUDE_ALL
+ && (_itemFilter == TokenFilter.INCLUDE_ALL)) {
+ return (_currToken = null);
+ }
+ }
+ // Anything buffered?
+ TokenFilterContext ctxt = _exposedContext;
+
+ if (ctxt != null) {
+ while (true) {
+ JsonToken t = ctxt.nextTokenToRead();
+ if (t != null) {
+ _currToken = t;
+ return t;
+ }
+ // all done with buffered stuff?
+ if (ctxt == _headContext) {
+ _exposedContext = null;
+ if (ctxt.inArray()) {
+ t = delegate.getCurrentToken();
+ _currToken = t;
+ if (_currToken == JsonToken.END_ARRAY) {
+ _headContext = _headContext.getParent();
+ _itemFilter = _headContext.getFilter();
+ }
+ return t;
+ }
+
+ // 19-Jul-2021, tatu: [core#700]: following was commented out?!
+ // Almost! Most likely still have the current token;
+ // with the sole exception of FIELD_NAME
+ t = delegate.currentToken();
+ if (t == JsonToken.END_OBJECT) {
+ _headContext = _headContext.getParent();
+ _itemFilter = _headContext.getFilter();
+ }
+ if (t != JsonToken.FIELD_NAME) {
+ _currToken = t;
+ return t;
+ }
+ break;
+ }
+ // If not, traverse down the context chain
+ ctxt = _headContext.findChildOf(ctxt);
+ _exposedContext = ctxt;
+ if (ctxt == null) { // should never occur
+ throw _constructError("Unexpected problem: chain of filtered context broken");
+ }
+ }
+ }
+
+ // If not, need to read more. If we got any:
+ JsonToken t = delegate.nextToken();
+ if (t == null) {
+ // no strict need to close, since we have no state here
+ _currToken = t;
+ return t;
+ }
+
+ // otherwise... to include or not?
+ TokenFilter f;
+
+ switch (t.id()) {
+ case ID_START_ARRAY:
+ f = _itemFilter;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildArrayContext(f, true);
+ return (_currToken = t);
+ }
+ if (f == null) { // does this occur?
+ delegate.skipChildren();
+ break;
+ }
+ // Otherwise still iffy, need to check
+ f = _headContext.checkValue(f);
+ if (f == null) {
+ delegate.skipChildren();
+ break;
+ }
+ if (f != TokenFilter.INCLUDE_ALL) {
+ f = f.filterStartArray();
+ }
+ _itemFilter = f;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildArrayContext(f, true);
+ return (_currToken = t);
+ } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) {
+ // TODO don't count as match?
+ _headContext = _headContext.createChildArrayContext(f, true);
+ return (_currToken = t);
+ }
+ _headContext = _headContext.createChildArrayContext(f, false);
+
+ // Also: only need buffering if parent path to be included
+ if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) {
+ t = _nextTokenWithBuffering(_headContext);
+ if (t != null) {
+ _currToken = t;
+ return t;
+ }
+ }
+ break;
+
+ case ID_START_OBJECT:
+ f = _itemFilter;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildObjectContext(f, true);
+ return (_currToken = t);
+ }
+ if (f == null) { // does this occur?
+ delegate.skipChildren();
+ break;
+ }
+ // Otherwise still iffy, need to check
+ f = _headContext.checkValue(f);
+ if (f == null) {
+ delegate.skipChildren();
+ break;
+ }
+ if (f != TokenFilter.INCLUDE_ALL) {
+ f = f.filterStartObject();
+ }
+ _itemFilter = f;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildObjectContext(f, true);
+ return (_currToken = t);
+ } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) {
+ // TODO don't count as match?
+ _headContext = _headContext.createChildObjectContext(f, true);
+ return (_currToken = t);
+ }
+ _headContext = _headContext.createChildObjectContext(f, false);
+ // Also: only need buffering if parent path to be included
+ if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) {
+ t = _nextTokenWithBuffering(_headContext);
+ if (t != null) {
+ _currToken = t;
+ return t;
+ }
+ }
+ // note: inclusion of surrounding Object handled separately via
+ // FIELD_NAME
+ break;
+
+ case ID_END_ARRAY:
+ case ID_END_OBJECT:
+ {
+ boolean returnEnd = _headContext.isStartHandled();
+ f = _headContext.getFilter();
+ if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) {
+ f.filterFinishArray();
+ }
+ _headContext = _headContext.getParent();
+ _itemFilter = _headContext.getFilter();
+ if (returnEnd) {
+ return (_currToken = t);
+ }
+ }
+ break;
+
+ case ID_FIELD_NAME:
+ {
+ final String name = delegate.getCurrentName();
+ // note: this will also set 'needToHandleName'
+ f = _headContext.setFieldName(name);
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _itemFilter = f;
+ return (_currToken = t);
+ }
+ if (f == null) {
+ delegate.nextToken();
+ delegate.skipChildren();
+ break;
+ }
+ f = f.includeProperty(name);
+ if (f == null) {
+ delegate.nextToken();
+ delegate.skipChildren();
+ break;
+ }
+ _itemFilter = f;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ if (_verifyAllowedMatches()) {
+ if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) {
+ return (_currToken = t);
+ }
+ } else {
+ delegate.nextToken();
+ delegate.skipChildren();
+ }
+ }
+ if (_inclusion != Inclusion.ONLY_INCLUDE_ALL) {
+ t = _nextTokenWithBuffering(_headContext);
+ if (t != null) {
+ _currToken = t;
+ return t;
+ }
+ }
+ break;
+ }
+
+ default: // scalar value
+ f = _itemFilter;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ return (_currToken = t);
+ }
+ if (f != null) {
+ f = _headContext.checkValue(f);
+ if ((f == TokenFilter.INCLUDE_ALL)
+ || ((f != null) && f.includeValue(delegate))) {
+ if (_verifyAllowedMatches()) {
+ return (_currToken = t);
+ }
+ }
+ }
+ // Otherwise not included (leaves must be explicitly included)
+ break;
+ }
+
+ // We get here if token was not yet found; offlined handling
+ return _nextToken2();
+ }
+
+ // Offlined handling for cases where there was no buffered token to
+ // return, and the token read next could not be returned as-is,
+ // at least not yet, but where we have not yet established that
+ // buffering is needed.
+ protected final JsonToken _nextToken2() throws IOException
+ {
+ main_loop:
+ while (true) {
+ JsonToken t = delegate.nextToken();
+ if (t == null) { // is this even legal?
+ _currToken = t;
+ return t;
+ }
+ TokenFilter f;
+
+ switch (t.id()) {
+ case ID_START_ARRAY:
+ f = _itemFilter;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildArrayContext(f, true);
+ return (_currToken = t);
+ }
+ if (f == null) { // does this occur?
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ // Otherwise still iffy, need to check
+ f = _headContext.checkValue(f);
+ if (f == null) {
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ if (f != TokenFilter.INCLUDE_ALL) {
+ f = f.filterStartArray();
+ }
+ _itemFilter = f;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildArrayContext(f, true);
+ return (_currToken = t);
+ } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) {
+ _headContext = _headContext.createChildArrayContext(f, true);
+ return (_currToken = t);
+ }
+ _headContext = _headContext.createChildArrayContext(f, false);
+ // but if we didn't figure it out yet, need to buffer possible events
+ if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) {
+ t = _nextTokenWithBuffering(_headContext);
+ if (t != null) {
+ _currToken = t;
+ return t;
+ }
+ }
+ continue main_loop;
+
+ case ID_START_OBJECT:
+ f = _itemFilter;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildObjectContext(f, true);
+ return (_currToken = t);
+ }
+ if (f == null) { // does this occur?
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ // Otherwise still iffy, need to check
+ f = _headContext.checkValue(f);
+ if (f == null) {
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ if (f != TokenFilter.INCLUDE_ALL) {
+ f = f.filterStartObject();
+ }
+ _itemFilter = f;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildObjectContext(f, true);
+ return (_currToken = t);
+ } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) {
+ _headContext = _headContext.createChildObjectContext(f, true);
+ return (_currToken = t);
+ }
+ _headContext = _headContext.createChildObjectContext(f, false);
+ if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) {
+ t = _nextTokenWithBuffering(_headContext);
+ if (t != null) {
+ _currToken = t;
+ return t;
+ }
+ }
+ continue main_loop;
+
+ case ID_END_ARRAY:
+ {
+ boolean returnEnd = _headContext.isStartHandled();
+ f = _headContext.getFilter();
+ if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) {
+ boolean includeEmpty = f.includeEmptyArray(_headContext.hasCurrentIndex());
+ f.filterFinishArray();
+ if (includeEmpty) {
+ return _nextBuffered(_headContext);
+ }
+ }
+ _headContext = _headContext.getParent();
+ _itemFilter = _headContext.getFilter();
+ if (returnEnd) {
+ return (_currToken = t);
+ }
+ }
+ continue main_loop;
+ case ID_END_OBJECT:
+ {
+ boolean returnEnd = _headContext.isStartHandled();
+ f = _headContext.getFilter();
+ if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) {
+ boolean includeEmpty = f.includeEmptyArray(_headContext.hasCurrentName());
+ f.filterFinishObject();
+ if (includeEmpty) {
+ return _nextBuffered(_headContext);
+ } }
+ _headContext = _headContext.getParent();
+ _itemFilter = _headContext.getFilter();
+ if (returnEnd) {
+ return (_currToken = t);
+ }
+ }
+ continue main_loop;
+
+ case ID_FIELD_NAME:
+ {
+ final String name = delegate.getCurrentName();
+ f = _headContext.setFieldName(name);
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _itemFilter = f;
+ return (_currToken = t);
+ }
+ if (f == null) { // filter out the value
+ delegate.nextToken();
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ f = f.includeProperty(name);
+ if (f == null) { // filter out the value
+ delegate.nextToken();
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ _itemFilter = f;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ if (_verifyAllowedMatches()) {
+ if (_inclusion == Inclusion.INCLUDE_ALL_AND_PATH) {
+ return (_currToken = t);
+ }
+ } else {
+ delegate.nextToken();
+ delegate.skipChildren();
+ }
+ continue main_loop;
+ }
+ if (_inclusion != Inclusion.ONLY_INCLUDE_ALL) {
+ t = _nextTokenWithBuffering(_headContext);
+ if (t != null) {
+ _currToken = t;
+ return t;
+ }
+ }
+ }
+ continue main_loop;
+
+ default: // scalar value
+ f = _itemFilter;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ return (_currToken = t);
+ }
+ if (f != null) {
+ f = _headContext.checkValue(f);
+ if ((f == TokenFilter.INCLUDE_ALL)
+ || ((f != null) && f.includeValue(delegate))) {
+ if (_verifyAllowedMatches()) {
+ return (_currToken = t);
+ }
+ }
+ }
+ // Otherwise not included (leaves must be explicitly included)
+ break;
+ }
+ }
+ }
+
+ // Method called when a new potentially included context is found.
+ protected final JsonToken _nextTokenWithBuffering(final TokenFilterContext buffRoot)
+ throws IOException
+ {
+ main_loop:
+ while (true) {
+ JsonToken t = delegate.nextToken();
+ if (t == null) { // is this even legal?
+ return t;
+ }
+ TokenFilter f;
+
+ // One simplification here: we know for a fact that the item filter is
+ // neither null nor 'include all', for most cases; the only exception
+ // being FIELD_NAME handling
+
+ switch (t.id()) {
+ case ID_START_ARRAY:
+ f = _headContext.checkValue(_itemFilter);
+ if (f == null) {
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ if (f != TokenFilter.INCLUDE_ALL) {
+ f = f.filterStartArray();
+ }
+ _itemFilter = f;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildArrayContext(f, true);
+ return _nextBuffered(buffRoot);
+ } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) {
+ // TODO don't count as match?
+ _headContext = _headContext.createChildArrayContext(f, true);
+ return _nextBuffered(buffRoot);
+ }
+ _headContext = _headContext.createChildArrayContext(f, false);
+ continue main_loop;
+
+ case ID_START_OBJECT:
+ f = _itemFilter;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildObjectContext(f, true);
+ return t;
+ }
+ if (f == null) { // does this occur?
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ // Otherwise still iffy, need to check
+ f = _headContext.checkValue(f);
+ if (f == null) {
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ if (f != TokenFilter.INCLUDE_ALL) {
+ f = f.filterStartObject();
+ }
+ _itemFilter = f;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _headContext = _headContext.createChildObjectContext(f, true);
+ return _nextBuffered(buffRoot);
+ } else if (f != null && _inclusion == Inclusion.INCLUDE_NON_NULL) {
+ // TODO don't count as match?
+ _headContext = _headContext.createChildArrayContext(f, true);
+ return _nextBuffered(buffRoot);
+ }
+ _headContext = _headContext.createChildObjectContext(f, false);
+ continue main_loop;
+
+ case ID_END_ARRAY:
+ {
+ // Unlike with other loops, here we know that content was NOT
+ // included (won't get this far otherwise)
+ f = _headContext.getFilter();
+ if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) {
+ boolean includeEmpty = f.includeEmptyArray(_headContext.hasCurrentIndex());
+ f.filterFinishArray();
+ if (includeEmpty) {
+ return _nextBuffered(buffRoot);
+ }
+ }
+ boolean gotEnd = (_headContext == buffRoot);
+ boolean returnEnd = gotEnd && _headContext.isStartHandled();
+
+ _headContext = _headContext.getParent();
+ _itemFilter = _headContext.getFilter();
+
+ if (returnEnd) {
+ return t;
+ }
+ if (gotEnd) {
+ return null;
+ }
+ }
+ continue main_loop;
+ case ID_END_OBJECT:
+ {
+ // Unlike with other loops, here we know that content was NOT
+ // included (won't get this far otherwise)
+ f = _headContext.getFilter();
+ if ((f != null) && (f != TokenFilter.INCLUDE_ALL)) {
+ boolean includeEmpty = f.includeEmptyObject(_headContext.hasCurrentName());
+ f.filterFinishObject();
+ if (includeEmpty) {
+ _headContext._currentName = _headContext._parent == null
+ ? null
+ : _headContext._parent._currentName;
+ _headContext._needToHandleName = false;
+ return _nextBuffered(buffRoot);
+ }
+ }
+ boolean gotEnd = (_headContext == buffRoot);
+ boolean returnEnd = gotEnd && _headContext.isStartHandled();
+
+ _headContext = _headContext.getParent();
+ _itemFilter = _headContext.getFilter();
+
+ if (returnEnd) {
+ return t;
+ }
+ if (gotEnd) {
+ return null;
+ }
+ }
+ continue main_loop;
+
+ case ID_FIELD_NAME:
+ {
+ final String name = delegate.getCurrentName();
+ f = _headContext.setFieldName(name);
+ if (f == TokenFilter.INCLUDE_ALL) {
+ _itemFilter = f;
+ return _nextBuffered(buffRoot);
+ }
+ if (f == null) { // filter out the value
+ delegate.nextToken();
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ f = f.includeProperty(name);
+ if (f == null) { // filter out the value
+ delegate.nextToken();
+ delegate.skipChildren();
+ continue main_loop;
+ }
+ _itemFilter = f;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ if (_verifyAllowedMatches()) {
+ return _nextBuffered(buffRoot);
+ } else {
+ // edge case: if no more matches allowed, reset filter
+ // to initial state to prevent missing a token in next iteration
+ _itemFilter = _headContext.setFieldName(name);
+ }
+ }
+ }
+ continue main_loop;
+
+ default: // scalar value
+ f = _itemFilter;
+ if (f == TokenFilter.INCLUDE_ALL) {
+ return _nextBuffered(buffRoot);
+ }
+ if (f != null) {
+ f = _headContext.checkValue(f);
+ if ((f == TokenFilter.INCLUDE_ALL)
+ || ((f != null) && f.includeValue(delegate))) {
+ if (_verifyAllowedMatches()) {
+ return _nextBuffered(buffRoot);
+ }
+ }
+ }
+ // Otherwise not included (leaves must be explicitly included)
+ continue main_loop;
+ }
+ }
+ }
+
+ private JsonToken _nextBuffered(TokenFilterContext buffRoot) throws IOException
+ {
+ _exposedContext = buffRoot;
+ TokenFilterContext ctxt = buffRoot;
+ JsonToken t = ctxt.nextTokenToRead();
+ if (t != null) {
+ return t;
+ }
+ while (true) {
+ // all done with buffered stuff?
+ if (ctxt == _headContext) {
+ throw _constructError("Internal error: failed to locate expected buffered tokens");
+ /*
+ _exposedContext = null;
+ break;
+ */
+ }
+ // If not, traverse down the context chain
+ ctxt = _exposedContext.findChildOf(ctxt);
+ _exposedContext = ctxt;
+ if (ctxt == null) { // should never occur
+ throw _constructError("Unexpected problem: chain of filtered context broken");
+ }
+ t = _exposedContext.nextTokenToRead();
+ if (t != null) {
+ return t;
+ }
+ }
+ }
+
+ private final boolean _verifyAllowedMatches() throws IOException {
+ if (_matchCount == 0 || _allowMultipleMatches) {
+ ++_matchCount;
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public JsonToken nextValue() throws IOException {
+ // Re-implemented same as ParserMinimalBase:
+ JsonToken t = nextToken();
+ if (t == JsonToken.FIELD_NAME) {
+ t = nextToken();
+ }
+ return t;
+ }
+
+ /**
+ * Need to override, re-implement similar to how method defined in
+ * {@link com.fasterxml.jackson.core.base.ParserMinimalBase}, to keep
+ * state correct here.
+ */
+ @Override
+ public JsonParser skipChildren() throws IOException
+ {
+ if ((_currToken != JsonToken.START_OBJECT)
+ && (_currToken != JsonToken.START_ARRAY)) {
+ return this;
+ }
+ int open = 1;
+
+ // Since proper matching of start/end markers is handled
+ // by nextToken(), we'll just count nesting levels here
+ while (true) {
+ JsonToken t = nextToken();
+ if (t == null) { // not ideal but for now, just return
+ return this;
+ }
+ if (t.isStructStart()) {
+ ++open;
+ } else if (t.isStructEnd()) {
+ if (--open == 0) {
+ return this;
+ }
+ }
+ }
+ }
+
+ /*
+ /**********************************************************
+ /* Public API, access to token information, text
+ /**********************************************************
+ */
+
+ // 19-Jul-2021, tatu: Cannot quite just delegate these methods due to oddity
+ // of property name token, which may be buffered.
+
+ @Override public String getText() throws IOException {
+ if (_currToken == JsonToken.FIELD_NAME) {
+ return currentName();
+ }
+ return delegate.getText();
+ }
+
+ @Override public boolean hasTextCharacters() {
+ if (_currToken == JsonToken.FIELD_NAME) {
+ return false;
+ }
+ return delegate.hasTextCharacters();
+ }
+
+ @Override public char[] getTextCharacters() throws IOException {
+ // Not optimal but is correct, unlike delegating (as underlying stream
+ // may point to something else due to buffering)
+ if (_currToken == JsonToken.FIELD_NAME) {
+ return currentName().toCharArray();
+ }
+ return delegate.getTextCharacters();
+ }
+
+ @Override public int getTextLength() throws IOException {
+ if (_currToken == JsonToken.FIELD_NAME) {
+ return currentName().length();
+ }
+ return delegate.getTextLength();
+ }
+ @Override public int getTextOffset() throws IOException {
+ if (_currToken == JsonToken.FIELD_NAME) {
+ return 0;
+ }
+ return delegate.getTextOffset();
+ }
+
+ /*
+ /**********************************************************
+ /* Public API, access to token information, numeric
+ /**********************************************************
+ */
+
+ @Override
+ public BigInteger getBigIntegerValue() throws IOException { return delegate.getBigIntegerValue(); }
+
+ @Override
+ public boolean getBooleanValue() throws IOException { return delegate.getBooleanValue(); }
+
+ @Override
+ public byte getByteValue() throws IOException { return delegate.getByteValue(); }
+
+ @Override
+ public short getShortValue() throws IOException { return delegate.getShortValue(); }
+
+ @Override
+ public BigDecimal getDecimalValue() throws IOException { return delegate.getDecimalValue(); }
+
+ @Override
+ public double getDoubleValue() throws IOException { return delegate.getDoubleValue(); }
+
+ @Override
+ public float getFloatValue() throws IOException { return delegate.getFloatValue(); }
+
+ @Override
+ public int getIntValue() throws IOException { return delegate.getIntValue(); }
+
+ @Override
+ public long getLongValue() throws IOException { return delegate.getLongValue(); }
+
+ @Override
+ public NumberType getNumberType() throws IOException { return delegate.getNumberType(); }
+
+ @Override
+ public Number getNumberValue() throws IOException { return delegate.getNumberValue(); }
+
+ /*
+ /**********************************************************
+ /* Public API, access to token information, coercion/conversion
+ /**********************************************************
+ */
+
+ @Override public int getValueAsInt() throws IOException { return delegate.getValueAsInt(); }
+ @Override public int getValueAsInt(int defaultValue) throws IOException { return delegate.getValueAsInt(defaultValue); }
+ @Override public long getValueAsLong() throws IOException { return delegate.getValueAsLong(); }
+ @Override public long getValueAsLong(long defaultValue) throws IOException { return delegate.getValueAsLong(defaultValue); }
+ @Override public double getValueAsDouble() throws IOException { return delegate.getValueAsDouble(); }
+ @Override public double getValueAsDouble(double defaultValue) throws IOException { return delegate.getValueAsDouble(defaultValue); }
+ @Override public boolean getValueAsBoolean() throws IOException { return delegate.getValueAsBoolean(); }
+ @Override public boolean getValueAsBoolean(boolean defaultValue) throws IOException { return delegate.getValueAsBoolean(defaultValue); }
+
+ @Override public String getValueAsString() throws IOException {
+ if (_currToken == JsonToken.FIELD_NAME) {
+ return currentName();
+ }
+ return delegate.getValueAsString();
+ }
+ @Override public String getValueAsString(String defaultValue) throws IOException {
+ if (_currToken == JsonToken.FIELD_NAME) {
+ return currentName();
+ }
+ return delegate.getValueAsString(defaultValue);
+ }
+
+ /*
+ /**********************************************************
+ /* Public API, access to token values, other
+ /**********************************************************
+ */
+
+ @Override public Object getEmbeddedObject() throws IOException { return delegate.getEmbeddedObject(); }
+ @Override public byte[] getBinaryValue(Base64Variant b64variant) throws IOException { return delegate.getBinaryValue(b64variant); }
+ @Override public int readBinaryValue(Base64Variant b64variant, OutputStream out) throws IOException { return delegate.readBinaryValue(b64variant, out); }
+ @Override public JsonLocation getTokenLocation() { return delegate.getTokenLocation(); }
+
+ /*
+ /**********************************************************
+ /* Internal helper methods
+ /**********************************************************
+ */
+
+ protected JsonStreamContext _filterContext() {
+ if (_exposedContext != null) {
+ return _exposedContext;
+ }
+ return _headContext;
+ }
+}
diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java
index 9dd5975cc1659..90555c6fed455 100644
--- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java
+++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java
@@ -334,6 +334,61 @@ private void testFilter(Builder expected, Builder sample, Set+ * try (var refs = new RefCountingListener(finalListener)) { + * for (var item : collection) { + * runAsyncAction(item, refs.acquire()); // completes the acquired listener on completion + * } + * } + *+ * + * The delegate listener is completed when execution leaves the try-with-resources block and every acquired reference is released. The + * {@link RefCountingListener} collects (a bounded number of) exceptions received by its subsidiary listeners, and completes the delegate + * listener with an exception if (and only if) any subsidiary listener fails. However, unlike a {@link GroupedActionListener} it leaves it + * to the caller to collect the results of successful completions by accumulating them in a data structure of its choice. Also unlike a + * {@link GroupedActionListener} there is no need to declare the number of subsidiary listeners up front: listeners can be acquired + * dynamically as needed. Finally, you can continue to acquire additional listeners even outside the try-with-resources block, perhaps in a + * separate thread, as long as there's at least one listener outstanding: + * + *
+ * try (var refs = new RefCountingListener(finalListener)) { + * for (var item : collection) { + * if (condition(item)) { + * runAsyncAction(item, refs.acquire().map(results::add)); + * } + * } + * if (flag) { + * runOneOffAsyncAction(refs.acquire().map(results::add)); + * return; + * } + * for (var item : otherCollection) { + * var itemRef = refs.acquire(); // delays completion while the background action is pending + * executorService.execute(() -> { + * try { + * if (condition(item)) { + * runOtherAsyncAction(item, refs.acquire().map(results::add)); + * } + * } finally { + * itemRef.onResponse(null); + * } + * }); + * } + * } + *+ * + * In particular (and also unlike a {@link GroupedActionListener}) this works even if you don't acquire any extra refs at all: in that case, + * the delegate listener is completed at the end of the try-with-resources block. + */ +public final class RefCountingListener implements Releasable { + + private final ActionListener
* try (var refs = new RefCountingRunnable(finalRunnable)) { @@ -95,7 +95,11 @@ public RefCountingRunnable(Runnable delegate) { * Acquire a reference to this object and return an action which releases it. The delegate {@link Runnable} is called when all its * references have been released. * - * Callers must take care to close the returned resource exactly once. This deviates from the contract of {@link java.io.Closeable}. + * It is invalid to call this method once all references are released. Doing so will trip an assertion if assertions are enabled, and + * will throw an {@link IllegalStateException} otherwise. + * + * It is also invalid to release the acquired resource more than once. Doing so will trip an assertion if assertions are enabled, but + * will be ignored otherwise. This deviates from the contract of {@link java.io.Closeable}. */ public Releasable acquire() { if (refCounted.tryIncRef()) { @@ -116,7 +120,8 @@ public ActionListeneracquireListener() { /** * Release the original reference to this object, which executes the delegate {@link Runnable} if there are no other references. * - * Callers must take care to close this resource exactly once. This deviates from the contract of {@link java.io.Closeable}. + * It is invalid to call this method more than once. Doing so will trip an assertion if assertions are enabled, but will be ignored + * otherwise. This deviates from the contract of {@link java.io.Closeable}. */ @Override public void close() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 3fc69ceb7b826..0dd85d873463d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -45,23 +45,23 @@ public class IndexShardRoutingTable { final ShardShuffler shuffler; final ShardId shardId; - + final ShardRouting[] shards; final ShardRouting primary; final List replicas; - final ShardRouting[] shards; final List activeShards; final List assignedShards; - final boolean allShardsStarted; - /** * The initializing list, including ones that are initializing on a target node because of relocation. * If we can come up with a better variable name, it would be nice... */ final List allInitializingShards; + final boolean allShardsStarted; + final int activeSearchShardCount; + final int totalSearchShardCount; IndexShardRoutingTable(ShardId shardId, List shards) { - this.shardId = shardId; this.shuffler = new RotationShardShuffler(Randomness.get().nextInt()); + this.shardId = shardId; this.shards = shards.toArray(ShardRouting[]::new); ShardRouting primary = null; @@ -70,6 +70,8 @@ public class IndexShardRoutingTable { List assignedShards = new ArrayList<>(); List allInitializingShards = new ArrayList<>(); boolean allShardsStarted = true; + int activeSearchShardCount = 0; + int totalSearchShardCount = 0; for (ShardRouting shard : this.shards) { if (shard.primary()) { assert primary == null : "duplicate primary: " + primary + " vs " + shard; @@ -79,6 +81,12 @@ public class IndexShardRoutingTable { } if (shard.active()) { activeShards.add(shard); + if (shard.role().isSearchable()) { + activeSearchShardCount++; + } + } + if (shard.role().isSearchable()) { + totalSearchShardCount++; } if (shard.initializing()) { allInitializingShards.add(shard); @@ -97,12 +105,14 @@ public class IndexShardRoutingTable { allShardsStarted = false; } } - this.allShardsStarted = allShardsStarted; this.primary = primary; this.replicas = CollectionUtils.wrapUnmodifiableOrEmptySingleton(replicas); this.activeShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(activeShards); this.assignedShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(assignedShards); this.allInitializingShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(allInitializingShards); + this.allShardsStarted = allShardsStarted; + this.activeSearchShardCount = activeSearchShardCount; + this.totalSearchShardCount = totalSearchShardCount; } /** @@ -461,6 +471,24 @@ public boolean allShardsStarted() { return allShardsStarted; } + /** + * @return the count of active searchable shards + */ + public int getActiveSearchShardCount() { + return activeSearchShardCount; + } + + /** + * @return the total count of searchable shards + */ + public int getTotalSearchShardCount() { + return totalSearchShardCount; + } + + public boolean hasSearchShards() { + return totalSearchShardCount > 0; + } + @Nullable public ShardRouting getByAllocationId(String allocationId) { for (ShardRouting shardRouting : assignedShards()) { diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index 9c69792914247..b5d93f74f0efa 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -96,6 +97,11 @@ public int hashCode() { return Objects.hash(diskMetadata); } + @Override + public String toString() { + return "HealthMetadata{diskMetadata=" + Strings.toString(diskMetadata) + '}'; + } + /** * Contains the thresholds necessary to determine the health of the disk space of a node. The thresholds are determined by the elected * master. diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index 50146324235a6..9d034bb3c249b 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -160,7 +160,7 @@ private void startMonitoringIfNecessary() { ); logger.debug("Local health monitoring started {}", monitoring); } else { - logger.debug("Local health monitoring already started {}, skipping", monitoring); + logger.trace("Local health monitoring already started {}, skipping", monitoring); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index b985281860b4e..4a25179479dcf 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -232,7 +232,6 @@ static Mapping createDynamicUpdate(DocumentParserContext context) { rootBuilder.addRuntimeField(runtimeField); } RootObjectMapper root = rootBuilder.build(MapperBuilderContext.root(context.mappingLookup().isSourceSynthetic())); - root.fixRedundantIncludes(); return context.mappingLookup().getMapping().mappingUpdate(root); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java index 8ed6bf3d1db7e..f1daf17f3ee69 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java @@ -16,7 +16,7 @@ /** * Holds context for building Mapper objects from their Builders */ -public final class MapperBuilderContext { +public class MapperBuilderContext { /** * The root context, to be used when building a tree of mappers diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index cbbc0ec7ab288..7a96d7c8e7d4f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -352,7 +352,6 @@ public DocumentMapper merge(String type, CompressedXContent mappingSource, Merge private DocumentMapper newDocumentMapper(Mapping mapping, MergeReason reason, CompressedXContent mappingSource) { DocumentMapper newMapper = new DocumentMapper(documentParser, mapping, mappingSource); - newMapper.mapping().getRoot().fixRedundantIncludes(); newMapper.validate(indexSettings, reason != MergeReason.MAPPING_RECOVERY); return newMapper; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 45f1363fb1a36..b82c13574e14b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -49,7 +49,21 @@ Builder includeInParent(boolean includeInParent) { @Override public NestedObjectMapper build(MapperBuilderContext context) { - return new NestedObjectMapper(name, context.buildFullName(name), buildMappers(context.createChildContext(name)), this); + boolean parentIncludedInRoot = this.includeInRoot.value(); + if (context instanceof NestedMapperBuilderContext nc) { + // we're already inside a nested mapper, so adjust our includes + if (nc.parentIncludedInRoot && this.includeInParent.value()) { + this.includeInRoot = Explicit.IMPLICIT_FALSE; + } + } else { + // this is a top-level nested mapper, so include_in_parent = include_in_root + parentIncludedInRoot |= this.includeInParent.value(); + if (this.includeInParent.value()) { + this.includeInRoot = Explicit.IMPLICIT_FALSE; + } + } + NestedMapperBuilderContext nestedContext = new NestedMapperBuilderContext(context.buildFullName(name), parentIncludedInRoot); + return new NestedObjectMapper(name, context.buildFullName(name), buildMappers(nestedContext), this); } } @@ -89,6 +103,21 @@ protected static void parseNested(String name, Map node, NestedO } } + private static class NestedMapperBuilderContext extends MapperBuilderContext { + + final boolean parentIncludedInRoot; + + NestedMapperBuilderContext(String path, boolean parentIncludedInRoot) { + super(path, false); + this.parentIncludedInRoot = parentIncludedInRoot; + } + + @Override + public MapperBuilderContext createChildContext(String name) { + return new NestedMapperBuilderContext(buildFullName(name), parentIncludedInRoot); + } + } + private Explicit includeInRoot; private Explicit includeInParent; private final String nestedTypePath; @@ -153,7 +182,7 @@ public ObjectMapper.Builder newBuilder(Version indexVersionCreated) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(simpleName()); builder.field("type", CONTENT_TYPE); - if (includeInParent.value()) { + if (includeInParent.explicit() && includeInParent.value()) { builder.field("include_in_parent", includeInParent.value()); } if (includeInRoot.value()) { @@ -191,10 +220,28 @@ public ObjectMapper merge(Mapper mergeWith, MapperService.MergeReason reason, Ma throw new MapperException("the [include_in_root] parameter can't be updated on a nested object mapping"); } } + if (parentBuilderContext instanceof NestedMapperBuilderContext nc) { + if (nc.parentIncludedInRoot && toMerge.includeInParent.value()) { + toMerge.includeInRoot = Explicit.IMPLICIT_FALSE; + } + } else { + if (toMerge.includeInParent.value()) { + toMerge.includeInRoot = Explicit.IMPLICIT_FALSE; + } + } toMerge.doMerge(mergeWithObject, reason, parentBuilderContext); return toMerge; } + @Override + protected MapperBuilderContext createChildContext(MapperBuilderContext mapperBuilderContext, String name) { + boolean parentIncludedInRoot = this.includeInRoot.value(); + if (mapperBuilderContext instanceof NestedMapperBuilderContext == false) { + parentIncludedInRoot |= this.includeInParent.value(); + } + return new NestedMapperBuilderContext(mapperBuilderContext.buildFullName(name), parentIncludedInRoot); + } + @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { throw new IllegalArgumentException("field [" + name() + "] of type [" + typeName() + "] doesn't support synthetic source"); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 9e5516b978ee2..96d494a44b8f6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -120,36 +120,6 @@ public RootObjectMapper build(MapperBuilderContext context) { } } - /** - * Removes redundant root includes in {@link NestedObjectMapper} trees to avoid duplicate - * fields on the root mapper when {@code isIncludeInRoot} is {@code true} for a node that is - * itself included into a parent node, for which either {@code isIncludeInRoot} is - * {@code true} or which is transitively included in root by a chain of nodes with - * {@code isIncludeInParent} returning {@code true}. - */ - // TODO it would be really nice to make this an implementation detail of NestedObjectMapper - // and run it as part of the builder, but this does not yet work because of the way that - // index templates are merged together. If merge() was run on Builder objects rather than - // on Mappers then we could move this. - public void fixRedundantIncludes() { - fixRedundantIncludes(this, true); - } - - private static void fixRedundantIncludes(ObjectMapper objectMapper, boolean parentIncluded) { - for (Mapper mapper : objectMapper) { - if (mapper instanceof NestedObjectMapper child) { - boolean isNested = child.isNested(); - boolean includeInRootViaParent = parentIncluded && isNested && child.isIncludeInParent(); - boolean includedInRoot = isNested && child.isIncludeInRoot(); - if (includeInRootViaParent && includedInRoot) { - child.setIncludeInParent(true); - child.setIncludeInRoot(false); - } - fixRedundantIncludes(child, includeInRootViaParent || includedInRoot); - } - } - } - private Explicit dynamicDateTimeFormatters; private Explicit dateDetection; private Explicit numericDetection; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 8ba8f3acde601..9dea01238a02f 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -27,10 +27,10 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.SingleResultDeduplicator; import org.elasticsearch.action.StepListener; -import org.elasticsearch.action.support.CountDownActionListener; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.cluster.ClusterState; @@ -1422,7 +1422,7 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte indexMetaIdentifiers = null; } - final ActionListener allMetaListener = new CountDownActionListener(2 + indices.size(), ActionListener.wrap(v -> { + try (var allMetaListeners = new RefCountingListener(ActionListener.wrap(v -> { final String slmPolicy = slmPolicy(snapshotInfo); final SnapshotDetails snapshotDetails = new SnapshotDetails( snapshotInfo.state(), @@ -1445,52 +1445,53 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte } }, onUpdateFailure) ); - }, onUpdateFailure)); - - // We ignore all FileAlreadyExistsException when writing metadata since otherwise a master failover while in this method will - // mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version of the - // index or global metadata will be compatible with the segments written in this snapshot as well. - // Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a way - // that decrements the generation it points at - final Metadata clusterMetadata = finalizeSnapshotContext.clusterMetadata(); - // Write Global MetaData - executor.execute( - ActionRunnable.run( - allMetaListener, - () -> GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress) - ) - ); + }, onUpdateFailure))) { + + // We ignore all FileAlreadyExistsException when writing metadata since otherwise a master failover while in this method + // will mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version of + // the index or global metadata will be compatible with the segments written in this snapshot as well. + // Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a way + // that decrements the generation it points at + final Metadata clusterMetadata = finalizeSnapshotContext.clusterMetadata(); + // Write Global MetaData + executor.execute( + ActionRunnable.run( + allMetaListeners.acquire(), + () -> GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress) + ) + ); - // write the index metadata for each index in the snapshot - for (IndexId index : indices) { - executor.execute(ActionRunnable.run(allMetaListener, () -> { - final IndexMetadata indexMetaData = clusterMetadata.index(index.getName()); - if (writeIndexGens) { - final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData); - String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers); - if (metaUUID == null) { - // We don't yet have this version of the metadata so we write it - metaUUID = UUIDs.base64UUID(); - INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress); - indexMetaIdentifiers.put(identifiers, metaUUID); + // write the index metadata for each index in the snapshot + for (IndexId index : indices) { + executor.execute(ActionRunnable.run(allMetaListeners.acquire(), () -> { + final IndexMetadata indexMetaData = clusterMetadata.index(index.getName()); + if (writeIndexGens) { + final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData); + String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers); + if (metaUUID == null) { + // We don't yet have this version of the metadata so we write it + metaUUID = UUIDs.base64UUID(); + INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress); + indexMetaIdentifiers.put(identifiers, metaUUID); + } + indexMetas.put(index, identifiers); + } else { + INDEX_METADATA_FORMAT.write( + clusterMetadata.index(index.getName()), + indexContainer(index), + snapshotId.getUUID(), + compress + ); } - indexMetas.put(index, identifiers); - } else { - INDEX_METADATA_FORMAT.write( - clusterMetadata.index(index.getName()), - indexContainer(index), - snapshotId.getUUID(), - compress - ); - } - })); + })); + } + executor.execute( + ActionRunnable.run( + allMetaListeners.acquire(), + () -> SNAPSHOT_FORMAT.write(snapshotInfo, blobContainer(), snapshotId.getUUID(), compress) + ) + ); } - executor.execute( - ActionRunnable.run( - allMetaListener, - () -> SNAPSHOT_FORMAT.write(snapshotInfo, blobContainer(), snapshotId.getUUID(), compress) - ) - ); }, onUpdateFailure); } diff --git a/server/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java b/server/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java index e82e1d3fac5fc..aab2e13999513 100644 --- a/server/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/ActiveShardCountTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -102,6 +103,73 @@ public void testEnoughShardsActiveLevelDefault() { runTestForOneActiveShard(ActiveShardCount.DEFAULT); } + public void testEnoughShardsActiveLevelDefaultWithSearchOnlyRole() { + final String indexName = "test-idx"; + final int numberOfShards = randomIntBetween(1, 5); + final int numberOfReplicas = randomIntBetween(4, 7); + final ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; + ClusterState clusterState = initializeWithNewIndex(indexName, numberOfShards, numberOfReplicas, createCustomRoleStrategy(1)); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startPrimaries(clusterState, indexName); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startLessThanWaitOnShards(clusterState, indexName, 1); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startAllShards(clusterState, indexName); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + } + + public void testEnoughShardsActiveCustomLevelWithSearchOnlyRole() { + final String indexName = "test-idx"; + final int numberOfShards = randomIntBetween(1, 5); + final int numberOfReplicas = randomIntBetween(4, 7); + final int activeShardCount = randomIntBetween(2, numberOfReplicas); + final ActiveShardCount waitForActiveShards = ActiveShardCount.from(activeShardCount); + ClusterState clusterState = initializeWithNewIndex(indexName, numberOfShards, numberOfReplicas, createCustomRoleStrategy(1)); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startPrimaries(clusterState, indexName); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startLessThanWaitOnShards(clusterState, indexName, activeShardCount - 2); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startWaitOnShards(clusterState, indexName, activeShardCount); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startAllShards(clusterState, indexName); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + } + + public void testEnoughShardsActiveWithNoSearchOnlyRoles() { + final String indexName = "test-idx"; + final int numberOfShards = randomIntBetween(1, 5); + final int numberOfReplicas = randomIntBetween(4, 7); + final ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; + ClusterState clusterState = initializeWithNewIndex( + indexName, + numberOfShards, + numberOfReplicas, + createCustomRoleStrategy(numberOfReplicas + 1) + ); + assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startPrimaries(clusterState, indexName); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startLessThanWaitOnShards(clusterState, indexName, 1); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + clusterState = startAllShards(clusterState, indexName); + assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); + } + + private static ShardRoutingRoleStrategy createCustomRoleStrategy(int indexShardCount) { + return new ShardRoutingRoleStrategy() { + @Override + public ShardRouting.Role newEmptyRole(int copyIndex) { + return copyIndex < indexShardCount ? ShardRouting.Role.INDEX_ONLY : ShardRouting.Role.SEARCH_ONLY; + } + + @Override + public ShardRouting.Role newReplicaRole() { + return ShardRouting.Role.SEARCH_ONLY; + } + }; + } + public void testEnoughShardsActiveRandom() { final String indexName = "test-idx"; final int numberOfShards = randomIntBetween(1, 5); @@ -166,12 +234,11 @@ public void testEnoughShardsActiveWithClosedIndex() { } } - private void runTestForOneActiveShard(final ActiveShardCount activeShardCount) { + private void runTestForOneActiveShard(final ActiveShardCount waitForActiveShards) { final String indexName = "test-idx"; final int numberOfShards = randomIntBetween(1, 5); final int numberOfReplicas = randomIntBetween(4, 7); - assert activeShardCount == ActiveShardCount.ONE || activeShardCount == ActiveShardCount.DEFAULT; - final ActiveShardCount waitForActiveShards = activeShardCount; + assert waitForActiveShards == ActiveShardCount.ONE || waitForActiveShards == ActiveShardCount.DEFAULT; ClusterState clusterState = initializeWithNewIndex(indexName, numberOfShards, numberOfReplicas); assertFalse(waitForActiveShards.enoughShardsActive(clusterState, indexName)); clusterState = startPrimaries(clusterState, indexName); @@ -180,7 +247,11 @@ private void runTestForOneActiveShard(final ActiveShardCount activeShardCount) { assertTrue(waitForActiveShards.enoughShardsActive(clusterState, indexName)); } - private ClusterState initializeWithNewIndex(final String indexName, final int numShards, final int numReplicas) { + private ClusterState initializeWithNewIndex(String indexName, int numShards, int numReplicas) { + return initializeWithNewIndex(indexName, numShards, numReplicas, TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); + } + + private ClusterState initializeWithNewIndex(String indexName, int numShards, int numReplicas, ShardRoutingRoleStrategy strategy) { // initial index creation and new routing table info final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) .settings(settings(Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())) @@ -188,9 +259,7 @@ private ClusterState initializeWithNewIndex(final String indexName, final int nu .numberOfReplicas(numReplicas) .build(); final Metadata metadata = Metadata.builder().put(indexMetadata, true).build(); - final RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) - .addAsNew(indexMetadata) - .build(); + final RoutingTable routingTable = RoutingTable.builder(strategy).addAsNew(indexMetadata).build(); return ClusterState.builder(new ClusterName("test_cluster")).metadata(metadata).routingTable(routingTable).build(); } diff --git a/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java b/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java new file mode 100644 index 0000000000000..6b899748438a5 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/support/RefCountingListenerTests.java @@ -0,0 +1,207 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.common.util.concurrent.EsExecutors.DIRECT_EXECUTOR_SERVICE; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class RefCountingListenerTests extends ESTestCase { + + public void testBasicOperation() throws InterruptedException { + final var executed = new AtomicBoolean(); + final var exceptionCount = new AtomicInteger(); + final var threads = new Thread[between(0, 3)]; + final var exceptionLimit = Math.max(1, between(0, threads.length)); + + boolean async = false; + final var startLatch = new CountDownLatch(1); + + try (var refs = new RefCountingListener(exceptionLimit, new ActionListener<>() { + @Override + public void onResponse(Void unused) { + assertTrue(executed.compareAndSet(false, true)); + assertEquals(0, exceptionCount.get()); + } + + @Override + public void onFailure(Exception e) { + assertTrue(executed.compareAndSet(false, true)); + assertThat(exceptionCount.get(), greaterThan(0)); + Throwable[] suppressed = e.getSuppressed(); + if (exceptionCount.get() > exceptionLimit) { + assertEquals(exceptionLimit, suppressed.length); + for (int i = 0; i < suppressed.length; i++) { + Throwable throwable = suppressed[i]; + if (i == suppressed.length - 1) { + assertThat( + throwable.getMessage(), + equalTo((exceptionCount.get() - exceptionLimit) + " further exceptions were dropped") + ); + } else { + assertThat(throwable.getMessage(), equalTo("simulated")); + } + } + } else { + assertEquals(exceptionCount.get() - 1, suppressed.length); + for (Throwable throwable : suppressed) { + assertThat(throwable.getMessage(), equalTo("simulated")); + } + } + } + + @Override + public String toString() { + return "test listener"; + } + })) { + assertEquals("refCounting[test listener]", refs.toString()); + var listener = refs.acquire(); + assertThat(listener.toString(), containsString("refCounting[test listener]")); + listener.onResponse(null); + + for (int i = 0; i < threads.length; i++) { + if (randomBoolean()) { + async = true; + var ref = refs.acquire(); + threads[i] = new Thread(() -> { + try { + assertTrue(startLatch.await(10, TimeUnit.SECONDS)); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + assertFalse(executed.get()); + if (randomBoolean()) { + ref.onResponse(null); + } else { + exceptionCount.incrementAndGet(); + ref.onFailure(new ElasticsearchException("simulated")); + } + }); + } + } + + assertFalse(executed.get()); + } + + assertNotEquals(async, executed.get()); + + for (Thread thread : threads) { + if (thread != null) { + thread.start(); + } + } + + startLatch.countDown(); + + for (Thread thread : threads) { + if (thread != null) { + thread.join(); + } + } + + assertTrue(executed.get()); + } + + @SuppressWarnings("resource") + public void testNullCheck() { + expectThrows(NullPointerException.class, () -> new RefCountingListener(between(1, 10), null)); + } + + public void testValidation() { + final var callCount = new AtomicInteger(); + final var refs = new RefCountingListener(Integer.MAX_VALUE, ActionListener.wrap(callCount::incrementAndGet)); + refs.close(); + assertEquals(1, callCount.get()); + + for (int i = between(1, 5); i > 0; i--) { + final ThrowingRunnable throwingRunnable; + final String expectedMessage; + if (randomBoolean()) { + throwingRunnable = refs::acquire; + expectedMessage = RefCountingRunnable.ALREADY_CLOSED_MESSAGE; + } else { + throwingRunnable = refs::close; + expectedMessage = "already closed"; + } + + assertEquals(expectedMessage, expectThrows(AssertionError.class, throwingRunnable).getMessage()); + assertEquals(1, callCount.get()); + } + } + + public void testJavaDocExample() { + final var flag = new AtomicBoolean(); + runExample(ActionListener.wrap(() -> assertTrue(flag.compareAndSet(false, true)))); + assertTrue(flag.get()); + } + + private void runExample(ActionListener finalListener) { + final var collection = randomList(10, Object::new); + final var otherCollection = randomList(10, Object::new); + final var flag = randomBoolean(); + @SuppressWarnings("UnnecessaryLocalVariable") + final var executorService = DIRECT_EXECUTOR_SERVICE; + final var results = new ArrayList<>(); + + try (var refs = new RefCountingListener(finalListener)) { + for (var item : collection) { + if (condition(item)) { + runAsyncAction(item, refs.acquire().map(results::add)); + } + } + if (flag) { + runOneOffAsyncAction(refs.acquire().map(results::add)); + return; + } + for (var item : otherCollection) { + var itemRef = refs.acquire(); // delays completion while the background action is pending + executorService.execute(() -> { + try { + if (condition(item)) { + runOtherAsyncAction(item, refs.acquire().map(results::add)); + } + } finally { + itemRef.onResponse(null); + } + }); + } + } + } + + @SuppressWarnings("unused") + private boolean condition(Object item) { + return randomBoolean(); + } + + @SuppressWarnings("unused") + private void runAsyncAction(Object item, ActionListener listener) { + listener.onResponse(null); + } + + @SuppressWarnings("unused") + private void runOtherAsyncAction(Object item, ActionListener listener) { + listener.onResponse(null); + } + + private void runOneOffAsyncAction(ActionListener listener) { + listener.onResponse(null); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index d106908fad5d3..a8b804017d1cf 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1439,4 +1439,79 @@ public void testNestedDoesNotSupportSubobjectsParameter() { ); assertEquals("Failed to parse mapping: Nested type [nested1] does not support [subobjects] parameter", exception.getMessage()); } + + public void testIndexTemplatesMergeIncludes() throws IOException { + { + MapperService mapperService = createMapperService(""" + { "_doc" : { "properties" : { + "field" : { + "type" : "nested", + "include_in_root" : true, + "properties" : { + "text" : { "type" : "text" } + } + } + }}} + """); + merge(mapperService, MergeReason.INDEX_TEMPLATE, """ + { "_doc" : { "properties" : { + "field" : { + "type" : "nested", + "include_in_parent" : true, + "properties" : { + "text" : { "type" : "text" } + } + } + }}} + """); + assertThat(Strings.toString(mapperService.documentMapper().mapping()), containsString(""" + {"type":"nested","include_in_parent":true,"properties":{""")); + } + { + MapperService mapperService = createMapperService(""" + { "_doc" : { "properties" : { + "field" : { + "type" : "nested", + "include_in_parent" : true, + "properties" : { + "text" : { "type" : "text" } + } + } + }}} + """); + merge(mapperService, MergeReason.INDEX_TEMPLATE, """ + { "_doc" : { "properties" : { + "field" : { + "type" : "nested", + "include_in_root" : true, + "properties" : { + "text" : { "type" : "text" } + } + } + }}} + """); + assertThat(Strings.toString(mapperService.documentMapper().mapping()), containsString(""" + {"type":"nested","include_in_parent":true,"properties":{""")); + } + } + + public void testMergeNested() { + NestedObjectMapper firstMapper = new NestedObjectMapper.Builder("nested1", Version.CURRENT).includeInParent(true) + .includeInRoot(true) + .build(MapperBuilderContext.root(false)); + NestedObjectMapper secondMapper = new NestedObjectMapper.Builder("nested1", Version.CURRENT).includeInParent(false) + .includeInRoot(true) + .build(MapperBuilderContext.root(false)); + + MapperException e = expectThrows(MapperException.class, () -> firstMapper.merge(secondMapper, MapperBuilderContext.root(false))); + assertThat(e.getMessage(), containsString("[include_in_parent] parameter can't be updated on a nested object mapping")); + + NestedObjectMapper result = (NestedObjectMapper) firstMapper.merge( + secondMapper, + MapperService.MergeReason.INDEX_TEMPLATE, + MapperBuilderContext.root(false) + ); + assertFalse(result.isIncludeInParent()); + assertTrue(result.isIncludeInRoot()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 089e1692a1a01..5749ce4650b1c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -13,8 +13,6 @@ import java.util.Collections; -import static org.hamcrest.Matchers.containsString; - public class ObjectMapperMergeTests extends ESTestCase { private final RootObjectMapper rootObjectMapper = createMapping(false, true, true, false); @@ -117,26 +115,6 @@ public void testMergeDisabledRootMapper() { assertEquals("test", merged.runtimeFields().iterator().next().name()); } - public void testMergeNested() { - NestedObjectMapper firstMapper = new NestedObjectMapper.Builder("nested1", Version.CURRENT).includeInParent(true) - .includeInRoot(true) - .build(MapperBuilderContext.root(false)); - NestedObjectMapper secondMapper = new NestedObjectMapper.Builder("nested1", Version.CURRENT).includeInParent(false) - .includeInRoot(true) - .build(MapperBuilderContext.root(false)); - - MapperException e = expectThrows(MapperException.class, () -> firstMapper.merge(secondMapper, MapperBuilderContext.root(false))); - assertThat(e.getMessage(), containsString("[include_in_parent] parameter can't be updated on a nested object mapping")); - - NestedObjectMapper result = (NestedObjectMapper) firstMapper.merge( - secondMapper, - MapperService.MergeReason.INDEX_TEMPLATE, - MapperBuilderContext.root(false) - ); - assertFalse(result.isIncludeInParent()); - assertTrue(result.isIncludeInRoot()); - } - public void testMergedFieldNamesFieldWithDotsSubobjectsFalseAtRoot() { RootObjectMapper mergeInto = createRootSubobjectFalseLeafWithDots(); RootObjectMapper mergeWith = createRootSubobjectFalseLeafWithDots(); diff --git a/settings.gradle b/settings.gradle index a157ba4784353..d98f65042e447 100644 --- a/settings.gradle +++ b/settings.gradle @@ -136,6 +136,10 @@ project(":libs").children.each { libsProject -> } } +project(":qa:stable-api").children.each { libsProject -> + libsProject.name = "elasticsearch-${libsProject.name}" +} + project(":test:external-modules").children.each { testProject -> testProject.name = "test-${testProject.name}" } diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc index ed30fc6c14550..23748172c92de 100644 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -107,7 +107,20 @@ The role definition might be missing or invalid. |====================== -To help track down these possibilities, add the following lines to the end of +To help track down these possibilities, enable additional logging to troubleshoot further. +You can enable debug logging by configuring the following persistent setting: + +[source, console] +---- +PUT /_cluster/settings +{ + "persistent": { + "logger.org.elasticsearch.xpack.security.authc": "debug" + } +} +---- + +Alternatively, you can add the following lines to the end of the `log4j2.properties` configuration file in the `ES_PATH_CONF`: [source,properties] @@ -116,6 +129,9 @@ logger.authc.name = org.elasticsearch.xpack.security.authc logger.authc.level = DEBUG ---------------- +Refer to < > for more +information. + A successful authentication should produce debug statements that list groups and role mappings. -- diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java index 8acabd1a8c204..73d2ed5efd41d 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java @@ -9,7 +9,7 @@ import org.elasticsearch.Assertions; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.core.Nullable; import java.util.ArrayList; @@ -243,30 +243,7 @@ public List waitForRange(final ByteRange range, final ByteRange subRange, f .collect(Collectors.toList()); } - // NB we work with ranges outside the mutex here, but only to interact with their completion listeners which are `final` so - // there is no risk of concurrent modification. - - switch (requiredRanges.size()) { - case 0 -> - // no need to wait for the gaps to be filled, the listener can be executed immediately - wrappedListener.onResponse(null); - case 1 -> { - final Range requiredRange = requiredRanges.get(0); - requiredRange.completionListener.addListener( - wrappedListener.map(progress -> null), - Math.min(requiredRange.completionListener.end, subRange.end()) - ); - } - default -> { - final GroupedActionListener groupedActionListener = new GroupedActionListener<>( - requiredRanges.size(), - wrappedListener.map(progress -> null) - ); - requiredRanges.forEach( - r -> r.completionListener.addListener(groupedActionListener, Math.min(r.completionListener.end, subRange.end())) - ); - } - } + subscribeToCompletionListeners(requiredRanges, subRange.end(), wrappedListener); return Collections.unmodifiableList(gaps); } @@ -332,31 +309,32 @@ public boolean waitForRangeIfPending(final ByteRange range, final ActionListener assert invariant(); } + subscribeToCompletionListeners(pendingRanges, range.end(), wrappedListener); + return true; + } + + private void subscribeToCompletionListeners(List requiredRanges, long rangeEnd, ActionListener listener) { // NB we work with ranges outside the mutex here, but only to interact with their completion listeners which are `final` so // there is no risk of concurrent modification. - - switch (pendingRanges.size()) { - case 0 -> wrappedListener.onResponse(null); + switch (requiredRanges.size()) { + case 0 -> + // no need to wait for the gaps to be filled, the listener can be executed immediately + listener.onResponse(null); case 1 -> { - final Range pendingRange = pendingRanges.get(0); - pendingRange.completionListener.addListener( - wrappedListener.map(progress -> null), - Math.min(pendingRange.completionListener.end, range.end()) + final Range requiredRange = requiredRanges.get(0); + requiredRange.completionListener.addListener( + listener.map(progress -> null), + Math.min(requiredRange.completionListener.end, rangeEnd) ); - return true; } default -> { - final GroupedActionListener groupedActionListener = new GroupedActionListener<>( - pendingRanges.size(), - wrappedListener.map(progress -> null) - ); - pendingRanges.forEach( - r -> r.completionListener.addListener(groupedActionListener, Math.min(r.completionListener.end, range.end())) - ); - return true; + try (var listeners = new RefCountingListener(listener)) { + for (Range range : requiredRanges) { + range.completionListener.addListener(listeners.acquire(), Math.min(range.completionListener.end, rangeEnd)); + } + } } } - return true; } private ActionListener wrapWithAssertions(ActionListener listener) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java index 416af1d64d328..5ef6a2d2c97b9 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java @@ -109,6 +109,7 @@ public void clusterChanged(ClusterChangedEvent event) { return new Tuple<>(savedClusterState, metadataVersion); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/93048") public void testReservedStatePersistsOnRestart() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0);