Skip to content

Commit

Permalink
Merge branch 'xdcr' into engine-factory-provider
Browse files Browse the repository at this point in the history
* xdcr:
  Maybe die before trying to log cause
  Log cause when a write and flush fails
  Die if write listener fails due to fatal error
  RecoveryIT.testHistoryUUIDIsGenerated should reduce unassigned shards delay instead of ensure green.
  Replace group map settings with affix setting (elastic#26819)
  Fix references to vm.max_map_count in Docker docs
  Add more instructions about jar hell (elastic#26837)
  Forbid negative values for index.unassigned.node_left.delayed_timeout (elastic#26828)
  Nitpicking typos in comments (elastic#26831)
  MetaData Builder doesn't properly prevent an alias with the same name as an index (elastic#26804)
  • Loading branch information
jasontedor committed Oct 1, 2017
2 parents 6e2f3ec + 5869a74 commit cdf80a8
Show file tree
Hide file tree
Showing 29 changed files with 366 additions and 146 deletions.
13 changes: 10 additions & 3 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,16 @@ then `File->New Project From Existing Sources`. Point to the root of
the source directory, select
`Import project from external model->Gradle`, enable
`Use auto-import`. Additionally, in order to run tests directly from
IDEA 2017.2 and above it is required to disable IDEA run launcher,
which can be achieved by adding `-Didea.no.launcher=true`
[JVM option](https://intellij-support.jetbrains.com/hc/en-us/articles/206544869-Configuring-JVM-options-and-platform-properties)
IDEA 2017.2 and above it is required to disable IDEA run launcher to avoid
finding yourself in "jar hell", which can be achieved by adding the
`-Didea.no.launcher=true` [JVM
option](https://intellij-support.jetbrains.com/hc/en-us/articles/206544869-Configuring-JVM-options-and-platform-properties)
or by adding `idea.no.launcher=true` to the
`idea.properties`[https://www.jetbrains.com/help/idea/file-idea-properties.html]
file which can be accessed under Help > Edit Custom Properties within IDEA. You
may also need to [remove `ant-javafx.jar` from your
classpath][https://github.com/elastic/elasticsearch/issues/14348] if that is
reported as a source of jar hell.

The Elasticsearch codebase makes heavy use of Java `assert`s and the
test runner requires that assertions be enabled within the JVM. This
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,14 +240,18 @@ static Setting<Integer> buildNumberOfShardsSetting() {
public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require";
public static final String INDEX_ROUTING_INCLUDE_GROUP_PREFIX = "index.routing.allocation.include";
public static final String INDEX_ROUTING_EXCLUDE_GROUP_PREFIX = "index.routing.allocation.exclude";
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING =
Setting.groupSetting(INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING =
Setting.groupSetting(INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
Setting.groupSetting(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);
public static final Setting<Settings> INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING =
Setting.groupSetting("index.routing.allocation.initial_recovery."); // this is only setable internally not a registered setting!!
public static final Setting.AffixSetting<String> INDEX_ROUTING_REQUIRE_GROUP_SETTING =
Setting.prefixKeySetting(INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", (key) ->
Setting.simpleString(key, (value, map) -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.IndexScope));
public static final Setting.AffixSetting<String> INDEX_ROUTING_INCLUDE_GROUP_SETTING =
Setting.prefixKeySetting(INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", (key) ->
Setting.simpleString(key, (value, map) -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.IndexScope));
public static final Setting.AffixSetting<String> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
Setting.prefixKeySetting(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + ".", (key) ->
Setting.simpleString(key, (value, map) -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.IndexScope));
public static final Setting.AffixSetting<String> INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING =
Setting.prefixKeySetting("index.routing.allocation.initial_recovery.", key -> Setting.simpleString(key));
// this is only setable internally not a registered setting!!

/**
* The number of active shard copies to check for before proceeding with a write operation.
Expand Down Expand Up @@ -1012,28 +1016,28 @@ public IndexMetaData build() {
filledInSyncAllocationIds.put(i, Collections.emptySet());
}
}
final Map<String, String> requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(settings).getAsMap();
final Map<String, String> requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters requireFilters;
if (requireMap.isEmpty()) {
requireFilters = null;
} else {
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
}
Map<String, String> includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.get(settings).getAsMap();
Map<String, String> includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters includeFilters;
if (includeMap.isEmpty()) {
includeFilters = null;
} else {
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
}
Map<String, String> excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.get(settings).getAsMap();
Map<String, String> excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters excludeFilters;
if (excludeMap.isEmpty()) {
excludeFilters = null;
} else {
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
}
Map<String, String> initialRecoveryMap = INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.get(settings).getAsMap();
Map<String, String> initialRecoveryMap = INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters initialRecoveryFilters;
if (initialRecoveryMap.isEmpty()) {
initialRecoveryFilters = null;
Expand Down
83 changes: 50 additions & 33 deletions core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;

import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.Diff;
Expand All @@ -33,6 +32,7 @@
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.common.collect.ImmutableOpenMap;
Expand Down Expand Up @@ -62,9 +62,11 @@
import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;

Expand Down Expand Up @@ -914,55 +916,70 @@ public MetaData build() {
// while these datastructures aren't even used.
// 2) The aliasAndIndexLookup can be updated instead of rebuilding it all the time.

// build all concrete indices arrays:
// TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices.
// When doing an operation across all indices, most of the time is spent on actually going to all shards and
// do the required operations, the bottleneck isn't resolving expressions into concrete indices.
List<String> allIndicesLst = new ArrayList<>();
final Set<String> allIndices = new HashSet<>(indices.size());
final List<String> allOpenIndices = new ArrayList<>();
final List<String> allClosedIndices = new ArrayList<>();
final Set<String> duplicateAliasesIndices = new HashSet<>();
for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
allIndicesLst.add(cursor.value.getIndex().getName());
}
String[] allIndices = allIndicesLst.toArray(new String[allIndicesLst.size()]);

List<String> allOpenIndicesLst = new ArrayList<>();
List<String> allClosedIndicesLst = new ArrayList<>();
for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
IndexMetaData indexMetaData = cursor.value;
final IndexMetaData indexMetaData = cursor.value;
final String name = indexMetaData.getIndex().getName();
boolean added = allIndices.add(name);
assert added : "double index named [" + name + "]";
if (indexMetaData.getState() == IndexMetaData.State.OPEN) {
allOpenIndicesLst.add(indexMetaData.getIndex().getName());
allOpenIndices.add(indexMetaData.getIndex().getName());
} else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
allClosedIndicesLst.add(indexMetaData.getIndex().getName());
allClosedIndices.add(indexMetaData.getIndex().getName());
}
indexMetaData.getAliases().keysIt().forEachRemaining(duplicateAliasesIndices::add);
}
duplicateAliasesIndices.retainAll(allIndices);
if (duplicateAliasesIndices.isEmpty() == false) {
// iterate again and constructs a helpful message
ArrayList<String> duplicates = new ArrayList<>();
for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
for (String alias: duplicateAliasesIndices) {
if (cursor.value.getAliases().containsKey(alias)) {
duplicates.add(alias + " (alias of " + cursor.value.getIndex() + ")");
}
}
}
assert duplicates.size() > 0;
throw new IllegalStateException("index and alias names need to be unique, but the following duplicates were found ["
+ Strings.collectionToCommaDelimitedString(duplicates)+ "]");

}
String[] allOpenIndices = allOpenIndicesLst.toArray(new String[allOpenIndicesLst.size()]);
String[] allClosedIndices = allClosedIndicesLst.toArray(new String[allClosedIndicesLst.size()]);

// build all indices map
SortedMap<String, AliasOrIndex> aliasAndIndexLookup = new TreeMap<>();
for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
IndexMetaData indexMetaData = cursor.value;
aliasAndIndexLookup.put(indexMetaData.getIndex().getName(), new AliasOrIndex.Index(indexMetaData));
AliasOrIndex existing = aliasAndIndexLookup.put(indexMetaData.getIndex().getName(), new AliasOrIndex.Index(indexMetaData));
assert existing == null : "duplicate for " + indexMetaData.getIndex();

for (ObjectObjectCursor<String, AliasMetaData> aliasCursor : indexMetaData.getAliases()) {
AliasMetaData aliasMetaData = aliasCursor.value;
AliasOrIndex aliasOrIndex = aliasAndIndexLookup.get(aliasMetaData.getAlias());
if (aliasOrIndex == null) {
aliasOrIndex = new AliasOrIndex.Alias(aliasMetaData, indexMetaData);
aliasAndIndexLookup.put(aliasMetaData.getAlias(), aliasOrIndex);
} else if (aliasOrIndex instanceof AliasOrIndex.Alias) {
AliasOrIndex.Alias alias = (AliasOrIndex.Alias) aliasOrIndex;
alias.addIndex(indexMetaData);
} else if (aliasOrIndex instanceof AliasOrIndex.Index) {
AliasOrIndex.Index index = (AliasOrIndex.Index) aliasOrIndex;
throw new IllegalStateException("index and alias names need to be unique, but alias [" + aliasMetaData.getAlias() + "] and index " + index.getIndex().getIndex() + " have the same name");
} else {
throw new IllegalStateException("unexpected alias [" + aliasMetaData.getAlias() + "][" + aliasOrIndex + "]");
}
aliasAndIndexLookup.compute(aliasMetaData.getAlias(), (aliasName, alias) -> {
if (alias == null) {
return new AliasOrIndex.Alias(aliasMetaData, indexMetaData);
} else {
assert alias instanceof AliasOrIndex.Alias : alias.getClass().getName();
((AliasOrIndex.Alias) alias).addIndex(indexMetaData);
return alias;
}
});
}
}
aliasAndIndexLookup = Collections.unmodifiableSortedMap(aliasAndIndexLookup);
// build all concrete indices arrays:
// TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices.
// When doing an operation across all indices, most of the time is spent on actually going to all shards and
// do the required operations, the bottleneck isn't resolving expressions into concrete indices.
String[] allIndicesArray = allIndices.toArray(new String[allIndices.size()]);
String[] allOpenIndicesArray = allOpenIndices.toArray(new String[allOpenIndices.size()]);
String[] allClosedIndicesArray = allClosedIndices.toArray(new String[allClosedIndices.size()]);

return new MetaData(clusterUUID, version, transientSettings, persistentSettings, indices.build(), templates.build(),
customs.build(), allIndices, allOpenIndices, allClosedIndices, aliasAndIndexLookup);
customs.build(), allIndicesArray, allOpenIndicesArray, allClosedIndicesArray, aliasAndIndexLookup);
}

public static String toXContent(MetaData metaData) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,21 +161,20 @@ public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request
final Settings normalizedSettings = Settings.builder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
Settings.Builder settingsForClosedIndices = Settings.builder();
Settings.Builder settingsForOpenIndices = Settings.builder();
Settings.Builder skipppedSettings = Settings.builder();
final Set<String> skippedSettings = new HashSet<>();

indexScopedSettings.validate(normalizedSettings);
// never allow to change the number of shards
for (Map.Entry<String, String> entry : normalizedSettings.getAsMap().entrySet()) {
Setting setting = indexScopedSettings.get(entry.getKey());
for (String key : normalizedSettings.getKeys()) {
Setting setting = indexScopedSettings.get(key);
assert setting != null; // we already validated the normalized settings
settingsForClosedIndices.put(entry.getKey(), entry.getValue());
settingsForClosedIndices.copy(key, normalizedSettings);
if (setting.isDynamic()) {
settingsForOpenIndices.put(entry.getKey(), entry.getValue());
settingsForOpenIndices.copy(key, normalizedSettings);
} else {
skipppedSettings.put(entry.getKey(), entry.getValue());
skippedSettings.add(key);
}
}
final Settings skippedSettigns = skipppedSettings.build();
final Settings closedSettings = settingsForClosedIndices.build();
final Settings openSettings = settingsForOpenIndices.build();
final boolean preserveExisting = request.isPreserveExisting();
Expand Down Expand Up @@ -210,11 +209,9 @@ public ClusterState execute(ClusterState currentState) {
}
}

if (!skippedSettigns.isEmpty() && !openIndices.isEmpty()) {
if (!skippedSettings.isEmpty() && !openIndices.isEmpty()) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Can't update non dynamic settings [%s] for open indices %s",
skippedSettigns.getAsMap().keySet(),
openIndices
"Can't update non dynamic settings [%s] for open indices %s", skippedSettings, openIndices
));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ public static boolean isIngestNode(Settings settings) {
* <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated.
* </p>
*
Expand All @@ -101,7 +101,7 @@ public DiscoveryNode(final String id, TransportAddress address, Version version)
* <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated.
* </p>
*
Expand All @@ -121,7 +121,7 @@ public DiscoveryNode(String id, TransportAddress address, Map<String, String> at
* <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated.
* </p>
*
Expand All @@ -143,7 +143,7 @@ public DiscoveryNode(String nodeName, String nodeId, TransportAddress address,
* <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
* the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered
* and updated.
* </p>
*
Expand Down Expand Up @@ -189,9 +189,8 @@ public DiscoveryNode(String nodeName, String nodeId, String ephemeralId, String

/** Creates a DiscoveryNode representing the local node. */
public static DiscoveryNode createLocal(Settings settings, TransportAddress publishAddress, String nodeId) {
Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(settings).getAsMap());
Map<String, String> attributes = Node.NODE_ATTRIBUTES.getAsMap(settings);
Set<Role> roles = getRolesFromSettings(settings);

return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes, roles, Version.CURRENT);
}

Expand Down
Loading

0 comments on commit cdf80a8

Please sign in to comment.