From 1a45b27d8b692853036ea3acafd50e873d8d0e85 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Wed, 15 Aug 2018 15:26:00 -0600 Subject: [PATCH 01/46] Move CharArrays to core lib (#32851) This change cleans up some methods in the CharArrays class from x-pack, which includes the unification of char[] to utf8 and utf8 to char[] conversions that intentionally do not use strings. There was previously an implementation in x-pack and in the reloading of secure settings. The method from the reloading of secure settings was adopted as it handled more scenarios related to the backing byte and char buffers that were used to perform the conversions. The cleaned up class is moved into libs/core to allow it to be used by requests that will be migrated to the high level rest client. Relates #32332 --- .../org/elasticsearch/common/CharArrays.java | 150 ++++++++++++++++++ .../elasticsearch/common/CharArraysTests.java | 72 +++++++++ .../NodesReloadSecureSettingsRequest.java | 66 +------- .../action/token/CreateTokenRequest.java | 2 +- .../action/user/ChangePasswordRequest.java | 2 +- .../security/action/user/PutUserRequest.java | 2 +- .../core/security/authc/support/BCrypt.java | 9 +- .../security/authc/support/CharArrays.java | 101 ------------ .../core/security/authc/support/Hasher.java | 1 + .../authc/support/UsernamePasswordToken.java | 14 +- .../xpack/core/ssl/PemUtils.java | 2 +- .../core/watcher/crypto/CryptoService.java | 2 +- .../ldap/ActiveDirectorySessionFactory.java | 2 +- .../authc/ldap/LdapSessionFactory.java | 2 +- .../ldap/LdapUserSearchSessionFactory.java | 2 +- .../authc/ldap/PoolingSessionFactory.java | 2 +- .../esnative/ESNativeMigrateToolTests.java | 2 +- .../example/realm/CustomRealm.java | 2 +- 18 files changed, 257 insertions(+), 178 deletions(-) create mode 100644 libs/core/src/main/java/org/elasticsearch/common/CharArrays.java create mode 100644 libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java diff --git a/libs/core/src/main/java/org/elasticsearch/common/CharArrays.java b/libs/core/src/main/java/org/elasticsearch/common/CharArrays.java new file mode 100644 index 0000000000000..907874ca5735b --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/common/CharArrays.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Objects; + +/** + * Helper class similar to Arrays to handle conversions for Char arrays + */ +public final class CharArrays { + + private CharArrays() {} + + /** + * Decodes the provided byte[] to a UTF-8 char[]. This is done while avoiding + * conversions to String. The provided byte[] is not modified by this method, so + * the caller needs to take care of clearing the value if it is sensitive. + */ + public static char[] utf8BytesToChars(byte[] utf8Bytes) { + final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes); + final CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); + final char[] chars; + if (charBuffer.hasArray()) { + // there is no guarantee that the char buffers backing array is the right size + // so we need to make a copy + chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit()); + Arrays.fill(charBuffer.array(), (char) 0); // clear sensitive data + } else { + final int length = charBuffer.limit() - charBuffer.position(); + chars = new char[length]; + charBuffer.get(chars); + // if the buffer is not read only we can reset and fill with 0's + if (charBuffer.isReadOnly() == false) { + charBuffer.clear(); // reset + for (int i = 0; i < charBuffer.limit(); i++) { + charBuffer.put((char) 0); + } + } + } + return chars; + } + + /** + * Encodes the provided char[] to a UTF-8 byte[]. This is done while avoiding + * conversions to String. The provided char[] is not modified by this method, so + * the caller needs to take care of clearing the value if it is sensitive. + */ + public static byte[] toUtf8Bytes(char[] chars) { + final CharBuffer charBuffer = CharBuffer.wrap(chars); + final ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer); + final byte[] bytes; + if (byteBuffer.hasArray()) { + // there is no guarantee that the byte buffers backing array is the right size + // so we need to make a copy + bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); + Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data + } else { + final int length = byteBuffer.limit() - byteBuffer.position(); + bytes = new byte[length]; + byteBuffer.get(bytes); + // if the buffer is not read only we can reset and fill with 0's + if (byteBuffer.isReadOnly() == false) { + byteBuffer.clear(); // reset + for (int i = 0; i < byteBuffer.limit(); i++) { + byteBuffer.put((byte) 0); + } + } + } + return bytes; + } + + /** + * Tests if a char[] contains a sequence of characters that match the prefix. This is like + * {@link String#startsWith(String)} but does not require conversion of the char[] to a string. + */ + public static boolean charsBeginsWith(String prefix, char[] chars) { + if (chars == null || prefix == null) { + return false; + } + + if (prefix.length() > chars.length) { + return false; + } + + for (int i = 0; i < prefix.length(); i++) { + if (chars[i] != prefix.charAt(i)) { + return false; + } + } + + return true; + } + + /** + * Constant time equality check of char arrays to avoid potential timing attacks. + */ + public static boolean constantTimeEquals(char[] a, char[] b) { + Objects.requireNonNull(a, "char arrays must not be null for constantTimeEquals"); + Objects.requireNonNull(b, "char arrays must not be null for constantTimeEquals"); + if (a.length != b.length) { + return false; + } + + int equals = 0; + for (int i = 0; i < a.length; i++) { + equals |= a[i] ^ b[i]; + } + + return equals == 0; + } + + /** + * Constant time equality check of strings to avoid potential timing attacks. + */ + public static boolean constantTimeEquals(String a, String b) { + Objects.requireNonNull(a, "strings must not be null for constantTimeEquals"); + Objects.requireNonNull(b, "strings must not be null for constantTimeEquals"); + if (a.length() != b.length()) { + return false; + } + + int equals = 0; + for (int i = 0; i < a.length(); i++) { + equals |= a.charAt(i) ^ b.charAt(i); + } + + return equals == 0; + } +} diff --git a/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java b/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java new file mode 100644 index 0000000000000..64b1ecd1f8a2d --- /dev/null +++ b/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import org.elasticsearch.test.ESTestCase; + +import java.nio.charset.StandardCharsets; + +public class CharArraysTests extends ESTestCase { + + public void testCharsToBytes() { + final String originalValue = randomUnicodeOfCodepointLengthBetween(0, 32); + final byte[] expectedBytes = originalValue.getBytes(StandardCharsets.UTF_8); + final char[] valueChars = originalValue.toCharArray(); + + final byte[] convertedBytes = CharArrays.toUtf8Bytes(valueChars); + assertArrayEquals(expectedBytes, convertedBytes); + } + + public void testBytesToUtf8Chars() { + final String originalValue = randomUnicodeOfCodepointLengthBetween(0, 32); + final byte[] bytes = originalValue.getBytes(StandardCharsets.UTF_8); + final char[] expectedChars = originalValue.toCharArray(); + + final char[] convertedChars = CharArrays.utf8BytesToChars(bytes); + assertArrayEquals(expectedChars, convertedChars); + } + + public void testCharsBeginsWith() { + assertFalse(CharArrays.charsBeginsWith(randomAlphaOfLength(4), null)); + assertFalse(CharArrays.charsBeginsWith(null, null)); + assertFalse(CharArrays.charsBeginsWith(null, randomAlphaOfLength(4).toCharArray())); + assertFalse(CharArrays.charsBeginsWith(randomAlphaOfLength(2), randomAlphaOfLengthBetween(3, 8).toCharArray())); + + final String prefix = randomAlphaOfLengthBetween(2, 4); + assertTrue(CharArrays.charsBeginsWith(prefix, prefix.toCharArray())); + final char[] prefixedValue = prefix.concat(randomAlphaOfLengthBetween(1, 12)).toCharArray(); + assertTrue(CharArrays.charsBeginsWith(prefix, prefixedValue)); + + final String modifiedPrefix = randomBoolean() ? prefix.substring(1) : prefix.substring(0, prefix.length() - 1); + final char[] nonMatchingValue = modifiedPrefix.concat(randomAlphaOfLengthBetween(0, 12)).toCharArray(); + assertFalse(CharArrays.charsBeginsWith(prefix, nonMatchingValue)); + assertTrue(CharArrays.charsBeginsWith(modifiedPrefix, nonMatchingValue)); + } + + public void testConstantTimeEquals() { + final String value = randomAlphaOfLengthBetween(0, 32); + assertTrue(CharArrays.constantTimeEquals(value, value)); + assertTrue(CharArrays.constantTimeEquals(value.toCharArray(), value.toCharArray())); + + final String other = randomAlphaOfLengthBetween(1, 32); + assertFalse(CharArrays.constantTimeEquals(value, other)); + assertFalse(CharArrays.constantTimeEquals(value.toCharArray(), other.toCharArray())); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index 50df7b1bb26e0..5320470d366e1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -22,14 +22,12 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.StandardCharsets; import java.util.Arrays; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -83,7 +81,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); final byte[] passwordBytes = in.readByteArray(); try { - this.secureSettingsPassword = new SecureString(utf8BytesToChars(passwordBytes)); + this.secureSettingsPassword = new SecureString(CharArrays.utf8BytesToChars(passwordBytes)); } finally { Arrays.fill(passwordBytes, (byte) 0); } @@ -92,69 +90,11 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - final byte[] passwordBytes = charsToUtf8Bytes(this.secureSettingsPassword.getChars()); + final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); try { out.writeByteArray(passwordBytes); } finally { Arrays.fill(passwordBytes, (byte) 0); } } - - /** - * Encodes the provided char[] to a UTF-8 byte[]. This is done while avoiding - * conversions to String. The provided char[] is not modified by this method, so - * the caller needs to take care of clearing the value if it is sensitive. - */ - private static byte[] charsToUtf8Bytes(char[] chars) { - final CharBuffer charBuffer = CharBuffer.wrap(chars); - final ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer); - final byte[] bytes; - if (byteBuffer.hasArray()) { - // there is no guarantee that the byte buffers backing array is the right size - // so we need to make a copy - bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); - Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data - } else { - final int length = byteBuffer.limit() - byteBuffer.position(); - bytes = new byte[length]; - byteBuffer.get(bytes); - // if the buffer is not read only we can reset and fill with 0's - if (byteBuffer.isReadOnly() == false) { - byteBuffer.clear(); // reset - for (int i = 0; i < byteBuffer.limit(); i++) { - byteBuffer.put((byte) 0); - } - } - } - return bytes; - } - - /** - * Decodes the provided byte[] to a UTF-8 char[]. This is done while avoiding - * conversions to String. The provided byte[] is not modified by this method, so - * the caller needs to take care of clearing the value if it is sensitive. - */ - public static char[] utf8BytesToChars(byte[] utf8Bytes) { - final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes); - final CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); - final char[] chars; - if (charBuffer.hasArray()) { - // there is no guarantee that the char buffers backing array is the right size - // so we need to make a copy - chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit()); - Arrays.fill(charBuffer.array(), (char) 0); // clear sensitive data - } else { - final int length = charBuffer.limit() - charBuffer.position(); - chars = new char[length]; - charBuffer.get(chars); - // if the buffer is not read only we can reset and fill with 0's - if (charBuffer.isReadOnly() == false) { - charBuffer.clear(); // reset - for (int i = 0; i < charBuffer.limit(); i++) { - charBuffer.put((char) 0); - } - } - } - return chars; - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java index 5956e1a661345..fdb46711c0c59 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import java.io.IOException; import java.util.Arrays; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java index f84b133d984b6..b78b81c060080 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java index f37072b9cf0fc..e704259396a34 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java @@ -8,12 +8,12 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; import java.io.IOException; import java.util.Map; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java index ceb93dc4c853c..a93476bbdc8da 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java @@ -14,6 +14,7 @@ // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.settings.SecureString; import java.security.SecureRandom; @@ -54,7 +55,7 @@ * String stronger_salt = BCrypt.gensalt(12)
* *

- * The amount of work increases exponentially (2**log_rounds), so + * The amount of work increases exponentially (2**log_rounds), so * each increment is twice as much work. The default log_rounds is * 10, and the valid range is 4 to 30. * @@ -689,7 +690,11 @@ public static String hashpw(SecureString password, String salt) { // the next lines are the SecureString replacement for the above commented-out section if (minor >= 'a') { - try (SecureString secureString = new SecureString(CharArrays.concat(password.getChars(), "\000".toCharArray()))) { + final char[] suffix = "\000".toCharArray(); + final char[] result = new char[password.length() + suffix.length]; + System.arraycopy(password.getChars(), 0, result, 0, password.length()); + System.arraycopy(suffix, 0, result, password.length(), suffix.length); + try (SecureString secureString = new SecureString(result)) { passwordb = CharArrays.toUtf8Bytes(secureString.getChars()); } } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java deleted file mode 100644 index 26df90c31a2de..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.security.authc.support; - -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; - -/** - * Helper class similar to Arrays to handle conversions for Char arrays - */ -public class CharArrays { - - public static char[] utf8BytesToChars(byte[] utf8Bytes) { - ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes); - CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); - char[] chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit()); - byteBuffer.clear(); - charBuffer.clear(); - return chars; - } - - /** - * Like String.indexOf for for an array of chars - */ - static int indexOf(char[] array, char ch) { - for (int i = 0; (i < array.length); i++) { - if (array[i] == ch) { - return i; - } - } - return -1; - } - - /** - * Converts the provided char[] to a UTF-8 byte[]. The provided char[] is not modified by this - * method, so the caller needs to take care of clearing the value if it is sensitive. - */ - public static byte[] toUtf8Bytes(char[] chars) { - CharBuffer charBuffer = CharBuffer.wrap(chars); - ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer); - byte[] bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); - Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data - return bytes; - } - - public static boolean charsBeginsWith(String prefix, char[] chars) { - if (chars == null || prefix == null) { - return false; - } - - if (prefix.length() > chars.length) { - return false; - } - - for (int i = 0; i < prefix.length(); i++) { - if (chars[i] != prefix.charAt(i)) { - return false; - } - } - - return true; - } - - public static boolean constantTimeEquals(char[] a, char[] b) { - if (a.length != b.length) { - return false; - } - - int equals = 0; - for (int i = 0; i < a.length; i++) { - equals |= a[i] ^ b[i]; - } - - return equals == 0; - } - - public static boolean constantTimeEquals(String a, String b) { - if (a.length() != b.length()) { - return false; - } - - int equals = 0; - for (int i = 0; i < a.length(); i++) { - equals |= a.charAt(i) ^ b.charAt(i); - } - - return equals == 0; - } - - public static char[] concat(char[] a, char[] b) { - final char[] result = new char[a.length + b.length]; - System.arraycopy(a, 0, result, 0, a.length); - System.arraycopy(b, 0, result, a.length, b.length); - return result; - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java index d12547bd90645..492622b2c519c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authc.support; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.settings.SecureString; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java index d8e58c29d237b..1349303600884 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.authc.support; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -107,7 +108,7 @@ private static UsernamePasswordToken extractToken(String headerValue) { throw authenticationError("invalid basic authentication header encoding", e); } - int i = CharArrays.indexOf(userpasswd, ':'); + int i = indexOfColon(userpasswd); if (i < 0) { throw authenticationError("invalid basic authentication header value"); } @@ -121,4 +122,15 @@ public static void putTokenHeader(ThreadContext context, UsernamePasswordToken t context.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue(token.username, token.password)); } + /** + * Like String.indexOf for for an array of chars + */ + private static int indexOfColon(char[] array) { + for (int i = 0; (i < array.length); i++) { + if (array[i] == ':') { + return i; + } + } + return -1; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java index d959c017e0a35..a3814a76a3e6e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ssl; import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import java.io.BufferedReader; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java index b1f3a32769ec9..a25e79ffdf66f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.watcher.WatcherField; import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java index d175e1b229312..8107d7488188b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java @@ -32,7 +32,7 @@ import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java index 36d14aa67c0de..70b2f0015cf7a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java index 2ec87888d8c13..a3541ec2759b3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java @@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java index 367bd525036e2..986fa1900e7c8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java @@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java index 212ee7ea499ec..6d75cf093714a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.BeforeClass; diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java index af3fb160e133f..c6502c05d252f 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java @@ -12,7 +12,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.common.CharArrays; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.protocol.xpack.security.User; From 93ada98eb54d99b9d98ed51778bd6422d18fd392 Mon Sep 17 00:00:00 2001 From: debadair Date: Wed, 15 Aug 2018 15:09:41 -0700 Subject: [PATCH 02/46] [DOCS] Fixing cross doc link to Stack Overview security topic. --- x-pack/docs/en/security/configuring-es.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 5e8f1adbc7aa8..53f36afc73481 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -9,7 +9,7 @@ password-protect your data as well as implement more advanced security measures such as encrypting communications, role-based access control, IP filtering, and auditing. For more information, see -{xpack-ref}/xpack-security.html[Securing the Elastic Stack]. +{xpack-ref}/elasticsearch-security.html[Securing the Elastic Stack]. To use {security} in {es}: From 70d80a3d093e327a05eeb7b8c49fee7c94952a8f Mon Sep 17 00:00:00 2001 From: markharwood Date: Thu, 16 Aug 2018 10:21:37 +0100 Subject: [PATCH 03/46] Docs enhancement: added reference to cluster-level setting `search.default_allow_partial_results` (#32810) Closes #32809 --- docs/reference/search/request-body.asciidoc | 3 ++- docs/reference/search/uri-request.asciidoc | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 2a51d705d83ec..e7c9b593af372 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -90,7 +90,8 @@ And here is a sample response: Set to `false` to return an overall failure if the request would produce partial results. Defaults to true, which will allow partial results in the case of timeouts - or partial failures. + or partial failures. This default can be controlled using the cluster-level setting + `search.default_allow_partial_results`. `terminate_after`:: diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index a90f32bb3cd36..279bc0c0384c1 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -125,5 +125,6 @@ more details on the different types of search that can be performed. |`allow_partial_search_results` |Set to `false` to return an overall failure if the request would produce partial results. Defaults to true, which will allow partial results in the case of timeouts -or partial failures.. +or partial failures. This default can be controlled using the cluster-level setting +`search.default_allow_partial_results`. |======================================================================= From e6bfba1d799cbc41fce5ea88e2252f78fd9cac65 Mon Sep 17 00:00:00 2001 From: datosh Date: Thu, 16 Aug 2018 11:34:41 +0200 Subject: [PATCH 04/46] [DOCS] Clarify sentence in network-host.asciidoc (#32429) --- docs/reference/setup/important-settings/network-host.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/important-settings/network-host.asciidoc b/docs/reference/setup/important-settings/network-host.asciidoc index 7e29e73123d8d..1788bfebc66b5 100644 --- a/docs/reference/setup/important-settings/network-host.asciidoc +++ b/docs/reference/setup/important-settings/network-host.asciidoc @@ -9,7 +9,7 @@ location on a single node. This can be useful for testing Elasticsearch's ability to form clusters, but it is not a configuration recommended for production. -In order to communicate and to form a cluster with nodes on other servers, your +In order to form a cluster with nodes on other servers, your node will need to bind to a non-loopback address. While there are many <>, usually all you need to configure is `network.host`: From 996ed73d735b1a2943cb5c7af6ba315a225b64c0 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 16 Aug 2018 11:44:20 +0200 Subject: [PATCH 05/46] Test: Fix unpredictive merges in DocumentSubsetReaderTests The merge policy that was used could lead to unpredictive merges due to the randomization of `setDeletesPctAllowed`. Closes #32457 --- .../authz/accesscontrol/DocumentSubsetReaderTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java index 38857e2170de4..dca2f37f3f224 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java @@ -80,9 +80,8 @@ public void cleanDirectory() throws Exception { bitsetFilterCache.close(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32457") public void testSearch() throws Exception { - IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig()); + IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random()))); Document document = new Document(); document.add(new StringField("field", "value1", Field.Store.NO)); From 7f6802cb51eda172c64fb223bf5728719f586b11 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 16 Aug 2018 10:48:56 +0100 Subject: [PATCH 06/46] [ML] Choose seconds to fix intermittent DatafeeedConfigTest failure --- .../xpack/core/ml/datafeed/DatafeedConfigTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index ffc13655d229c..3030449abd1b6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -100,7 +100,7 @@ public static DatafeedConfig createRandomizedDatafeedConfig(String jobId, long b if (aggHistogramInterval == null) { builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000))); } else { - builder.setFrequency(TimeValue.timeValueMillis(randomIntBetween(1, 5) * aggHistogramInterval)); + builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, 5) * aggHistogramInterval)); } } if (randomBoolean()) { From 039babddf532c918ae3f0ac5360e85e7ba5b3052 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 16 Aug 2018 11:53:01 +0200 Subject: [PATCH 07/46] CharArraysTests: Fix test bug. --- .../test/java/org/elasticsearch/common/CharArraysTests.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java b/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java index 64b1ecd1f8a2d..9283283ab0861 100644 --- a/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java +++ b/libs/core/src/test/java/org/elasticsearch/common/CharArraysTests.java @@ -55,7 +55,10 @@ public void testCharsBeginsWith() { assertTrue(CharArrays.charsBeginsWith(prefix, prefixedValue)); final String modifiedPrefix = randomBoolean() ? prefix.substring(1) : prefix.substring(0, prefix.length() - 1); - final char[] nonMatchingValue = modifiedPrefix.concat(randomAlphaOfLengthBetween(0, 12)).toCharArray(); + char[] nonMatchingValue; + do { + nonMatchingValue = modifiedPrefix.concat(randomAlphaOfLengthBetween(0, 12)).toCharArray(); + } while (new String(nonMatchingValue).startsWith(prefix)); assertFalse(CharArrays.charsBeginsWith(prefix, nonMatchingValue)); assertTrue(CharArrays.charsBeginsWith(modifiedPrefix, nonMatchingValue)); } From d80457ee2a0a5ce8973b48a06b5d60ee2c5e64c1 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 16 Aug 2018 11:07:20 +0100 Subject: [PATCH 08/46] Mutes test in DuelScrollIT Due to https://github.com/elastic/elasticsearch/issues/32682 --- .../test/java/org/elasticsearch/search/scroll/DuelScrollIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java b/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java index 31fcfa7155cc0..1ddd11e5d0f7d 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -256,6 +257,7 @@ private void testDuelIndexOrder(SearchType searchType, boolean trackScores, int } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32682") public void testDuelIndexOrderQueryThenFetch() throws Exception { final SearchType searchType = RandomPicks.randomFrom(random(), Arrays.asList(SearchType.QUERY_THEN_FETCH, SearchType.DFS_QUERY_THEN_FETCH)); From e35be019014aafba9c34484e5711a04ce482ffd1 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 16 Aug 2018 12:27:21 +0200 Subject: [PATCH 09/46] AwaitFix AckIT. Relates #32767 --- server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index 2cd8a2c27c714..df97854cc35b0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.ack; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -50,6 +51,7 @@ import static org.hamcrest.Matchers.notNullValue; @ClusterScope(minNumDataNodes = 2) +@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/32767") public class AckIT extends ESIntegTestCase { @Override From f8c7414ee861b441f81716e13c3190f4d2d97c54 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 16 Aug 2018 07:24:05 -0400 Subject: [PATCH 10/46] Remove passphrase support from reload settings API (#32889) We do not support passphrases on the secure settings storage (the keystore). Yet, we added support for this in the API layer. This commit removes this support so that we are not limited in our future options, or have to make a breaking change. --- .../NodesReloadSecureSettingsRequest.java | 68 +------------ ...desReloadSecureSettingsRequestBuilder.java | 49 ---------- ...nsportNodesReloadSecureSettingsAction.java | 6 +- .../RestReloadSecureSettingsAction.java | 11 +-- .../action/admin/ReloadSecureSettingsIT.java | 98 ++----------------- 5 files changed, 16 insertions(+), 216 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index 5320470d366e1..fb3e6ac71adf3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -19,82 +19,22 @@ package org.elasticsearch.action.admin.cluster.node.reload; - -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.nodes.BaseNodesRequest; -import org.elasticsearch.common.CharArrays; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.SecureString; - -import java.io.IOException; -import java.util.Arrays; - -import static org.elasticsearch.action.ValidateActions.addValidationError; /** - * Request for a reload secure settings action + * Request for a reload secure settings action. */ public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { - /** - * The password which is broadcasted to all nodes, but is never stored on - * persistent storage. The password is used to reread and decrypt the contents - * of the node's keystore (backing the implementation of - * {@code SecureSettings}). - */ - private SecureString secureSettingsPassword; - public NodesReloadSecureSettingsRequest() { } /** - * Reload secure settings only on certain nodes, based on the nodes ids - * specified. If none are passed, secure settings will be reloaded on all the - * nodes. + * Reload secure settings only on certain nodes, based on the nodes IDs specified. If none are passed, secure settings will be reloaded + * on all the nodes. */ - public NodesReloadSecureSettingsRequest(String... nodesIds) { + public NodesReloadSecureSettingsRequest(final String... nodesIds) { super(nodesIds); } - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (secureSettingsPassword == null) { - validationException = addValidationError("secure settings password cannot be null (use empty string instead)", - validationException); - } - return validationException; - } - - public SecureString secureSettingsPassword() { - return secureSettingsPassword; - } - - public NodesReloadSecureSettingsRequest secureStorePassword(SecureString secureStorePassword) { - this.secureSettingsPassword = secureStorePassword; - return this; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - final byte[] passwordBytes = in.readByteArray(); - try { - this.secureSettingsPassword = new SecureString(CharArrays.utf8BytesToChars(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); - try { - out.writeByteArray(passwordBytes); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index b5f2f73e56f51..c8250455e6ba3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -19,19 +19,8 @@ package org.elasticsearch.action.admin.cluster.node.reload; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; - -import java.io.IOException; -import java.io.InputStream; -import java.util.Objects; /** * Builder for the reload secure settings nodes request @@ -39,46 +28,8 @@ public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder { - public static final String SECURE_SETTINGS_PASSWORD_FIELD_NAME = "secure_settings_password"; - public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client, NodesReloadSecureSettingsAction action) { super(client, action, new NodesReloadSecureSettingsRequest()); } - public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureString secureStorePassword) { - request.secureStorePassword(secureStorePassword); - return this; - } - - public NodesReloadSecureSettingsRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { - Objects.requireNonNull(xContentType); - // EMPTY is ok here because we never call namedObject - try (InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, stream)) { - XContentParser.Token token; - token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("expected an object, but found token [{}]", token); - } - token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME || false == SECURE_SETTINGS_PASSWORD_FIELD_NAME.equals(parser.currentName())) { - throw new ElasticsearchParseException("expected a field named [{}], but found [{}]", SECURE_SETTINGS_PASSWORD_FIELD_NAME, - token); - } - token = parser.nextToken(); - if (token != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead", - SECURE_SETTINGS_PASSWORD_FIELD_NAME, token); - } - final String password = parser.text(); - setSecureStorePassword(new SecureString(password.toCharArray())); - token = parser.nextToken(); - if (token != XContentParser.Token.END_OBJECT) { - throw new ElasticsearchParseException("expected end of object, but found token [{}]", token); - } - } - return this; - } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index 0f44170fa603b..b8a36bac68d61 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginsService; @@ -82,16 +81,13 @@ protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() { @Override protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) { - final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; - final SecureString secureSettingsPassword = request.secureSettingsPassword(); try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { // reread keystore from config file if (keystore == null) { return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), new IllegalStateException("Keystore is missing")); } - // decrypt the keystore using the password from the request - keystore.decrypt(secureSettingsPassword.getChars()); + keystore.decrypt(new char[0]); // add the keystore to the original node settings object final Settings settingsWithKeystore = Settings.builder() .put(environment.settings(), false) diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 0697871ea5d1c..2251615d678fb 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -59,7 +59,6 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client .cluster() .prepareReloadSecureSettings() .setTimeout(request.param("timeout")) - .source(request.requiredContent(), request.getXContentType()) .setNodesIds(nodesIds); final NodesReloadSecureSettingsRequest nodesRequest = nodesRequestBuilder.request(); return channel -> nodesRequestBuilder @@ -68,12 +67,12 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) throws Exception { builder.startObject(); - RestActions.buildNodesHeader(builder, channel.request(), response); - builder.field("cluster_name", response.getClusterName().value()); - response.toXContent(builder, channel.request()); + { + RestActions.buildNodesHeader(builder, channel.request(), response); + builder.field("cluster_name", response.getClusterName().value()); + response.toXContent(builder, channel.request()); + } builder.endObject(); - // clear password for the original request - nodesRequest.secureSettingsPassword().close(); return new BytesRestResponse(RestStatus.OK, builder); } }); diff --git a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index 7952758240544..3f9e258ffec1c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -20,11 +20,9 @@ package org.elasticsearch.action.admin; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureSettings; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; @@ -44,11 +42,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.containsString; public class ReloadSecureSettingsIT extends ESIntegTestCase { @@ -62,7 +60,7 @@ public void testMissingKeystoreFile() throws Exception { Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile())); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + client().admin().cluster().prepareReloadSecureSettings().execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { @@ -96,44 +94,6 @@ public void onFailure(Exception e) { assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); } - public void testNullKeystorePassword() throws Exception { - final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); - final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) - .stream().findFirst().get(); - final AtomicReference reloadSettingsError = new AtomicReference<>(); - final int initialReloadCount = mockReloadablePlugin.getReloadCount(); - final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().execute( - new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - reloadSettingsError.set(new AssertionError("Null keystore password should fail")); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - try { - assertThat(e, instanceOf(ActionRequestValidationException.class)); - assertThat(e.getMessage(), containsString("secure settings password cannot be null")); - } catch (final AssertionError ae) { - reloadSettingsError.set(ae); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - if (reloadSettingsError.get() != null) { - throw reloadSettingsError.get(); - } - // in the null password case no reload should be triggered - assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); - } - public void testInvalidKeystoreFile() throws Exception { final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) @@ -149,7 +109,7 @@ public void testInvalidKeystoreFile() throws Exception { Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING); } final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + client().admin().cluster().prepareReloadSecureSettings().execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { @@ -181,52 +141,6 @@ public void onFailure(Exception e) { assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); } - public void testWrongKeystorePassword() throws Exception { - final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); - final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class) - .stream().findFirst().get(); - final Environment environment = internalCluster().getInstance(Environment.class); - final AtomicReference reloadSettingsError = new AtomicReference<>(); - final int initialReloadCount = mockReloadablePlugin.getReloadCount(); - // "some" keystore should be present in this case - writeEmptyKeystore(environment, new char[0]); - final CountDownLatch latch = new CountDownLatch(1); - client().admin() - .cluster() - .prepareReloadSecureSettings() - .setSecureStorePassword(new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' })) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - assertThat(nodeResponse.reloadException(), instanceOf(SecurityException.class)); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); - latch.countDown(); - } - }); - latch.await(); - if (reloadSettingsError.get() != null) { - throw reloadSettingsError.get(); - } - // in the wrong password case no reload should be triggered - assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount)); - } - public void testMisbehavingPlugin() throws Exception { final Environment environment = internalCluster().getInstance(Environment.class); final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); @@ -247,7 +161,7 @@ public void testMisbehavingPlugin() throws Exception { .get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build()) .toString(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + client().admin().cluster().prepareReloadSecureSettings().execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { @@ -314,7 +228,7 @@ protected Collection> nodePlugins() { private void successfulReloadCall() throws InterruptedException { final AtomicReference reloadSettingsError = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); - client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute( + client().admin().cluster().prepareReloadSecureSettings().execute( new ActionListener() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { From b87f3062b77cab7888e9037b4996f2c26db12816 Mon Sep 17 00:00:00 2001 From: Hazem Khaled Date: Thu, 16 Aug 2018 14:54:04 +0300 Subject: [PATCH 11/46] [DOCS] Update WordPress plugins links (#32194) --- docs/plugins/integrations.asciidoc | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index 90f2c685fdaeb..8bffe5193ed7b 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -17,14 +17,11 @@ Integrations are not plugins, but are external tools or modules that make it eas * https://drupal.org/project/elasticsearch_connector[Drupal]: Drupal Elasticsearch integration. -* https://wordpress.org/plugins/wpsolr-search-engine/[WPSOLR]: - Elasticsearch (and Apache Solr) WordPress Plugin - -* http://searchbox-io.github.com/wp-elasticsearch/[Wp-Elasticsearch]: +* https://wordpress.org/plugins/elasticpress/[ElasticPress]: Elasticsearch WordPress Plugin -* https://github.com/wallmanderco/elasticsearch-indexer[Elasticsearch Indexer]: - Elasticsearch WordPress Plugin +* https://wordpress.org/plugins/wpsolr-search-engine/[WPSOLR]: + Elasticsearch (and Apache Solr) WordPress Plugin * https://doc.tiki.org/Elasticsearch[Tiki Wiki CMS Groupware]: Tiki has native support for Elasticsearch. This provides faster & better From aedc2c1c498688970afcebe08726480e611f117b Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Thu, 16 Aug 2018 07:18:43 -0500 Subject: [PATCH 12/46] HLRC: adding machine learning delete job (#32820) * HLRC: adding machine learning delete job * Fixing whitespace * Moving docs and tests around * Unifying ml asciidoc file naming convention --- .../client/MachineLearningClient.java | 40 ++++++++++ .../client/RequestConverters.java | 16 ++++ .../client/MachineLearningIT.java | 15 ++++ .../client/RequestConvertersTests.java | 15 ++++ .../MlClientDocumentationIT.java | 56 +++++++++++++- .../high-level/ml/delete-job.asciidoc | 49 ++++++++++++ .../high-level/supported-apis.asciidoc | 2 + .../protocol/xpack/ml/DeleteJobRequest.java | 75 +++++++++++++++++++ .../protocol/xpack/ml/DeleteJobResponse.java | 60 +++++++++++++++ .../xpack/ml/DeleteJobRequestTests.java | 45 +++++++++++ .../xpack/ml/DeleteJobResponseTests.java | 42 +++++++++++ 11 files changed, 412 insertions(+), 3 deletions(-) create mode 100644 docs/java-rest/high-level/ml/delete-job.asciidoc create mode 100644 x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequest.java create mode 100644 x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponse.java create mode 100644 x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequestTests.java create mode 100644 x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index a3e5ba72b773f..2e7914e64abdb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -19,6 +19,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; +import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; import org.elasticsearch.protocol.xpack.ml.PutJobRequest; @@ -80,6 +82,44 @@ public void putJobAsync(PutJobRequest request, RequestOptions options, ActionLis Collections.emptySet()); } + /** + * Deletes the given Machine Learning Job + *

+ * For additional info + * see ML Delete Job documentation + *

+ * @param request the request to delete the job + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return action acknowledgement + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public DeleteJobResponse deleteJob(DeleteJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + RequestConverters::deleteMachineLearningJob, + options, + DeleteJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Deletes the given Machine Learning Job asynchronously and notifies the listener on completion + *

+ * For additional info + * see ML Delete Job documentation + *

+ * @param request the request to delete the job + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void deleteJobAsync(DeleteJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + RequestConverters::deleteMachineLearningJob, + options, + DeleteJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Opens a Machine Learning Job. * When you open a new job, it starts with an empty model. diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 973c0ce126d37..c40b4893e0146 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -112,6 +112,7 @@ import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; +import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.PutJobRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; @@ -1211,6 +1212,21 @@ static Request putMachineLearningJob(PutJobRequest putJobRequest) throws IOExcep return request; } + static Request deleteMachineLearningJob(DeleteJobRequest deleteJobRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(deleteJobRequest.getJobId()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + Params params = new Params(request); + params.putParam("force", Boolean.toString(deleteJobRequest.isForce())); + + return request; + } + static Request machineLearningOpenJob(OpenJobRequest openJobRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 94e73a14c188c..0037460150f1a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -20,6 +20,8 @@ import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; +import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; import org.elasticsearch.protocol.xpack.ml.PutJobRequest; @@ -48,6 +50,19 @@ public void testPutJob() throws Exception { assertThat(createdJob.getJobType(), is(Job.ANOMALY_DETECTOR_JOB_TYPE)); } + public void testDeleteJob() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + DeleteJobResponse response = execute(new DeleteJobRequest(jobId), + machineLearningClient::deleteJob, + machineLearningClient::deleteJobAsync); + + assertTrue(response.isAcknowledged()); + } + public void testOpenJob() throws Exception { String jobId = randomValidJobId(); Job job = buildJob(jobId); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 1c9707e0e27fa..786cb94f8926d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -127,6 +127,7 @@ import org.elasticsearch.index.rankeval.RestRankEvalAction; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; +import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; @@ -2611,6 +2612,20 @@ public void testXPackDeleteWatch() { assertThat(request.getEntity(), nullValue()); } + public void testDeleteMachineLearningJob() { + String jobId = randomAlphaOfLength(10); + DeleteJobRequest deleteJobRequest = new DeleteJobRequest(jobId); + + Request request = RequestConverters.deleteMachineLearningJob(deleteJobRequest); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId, request.getEndpoint()); + assertEquals(Boolean.toString(false), request.getParameters().get("force")); + + deleteJobRequest.setForce(true); + request = RequestConverters.deleteMachineLearningJob(deleteJobRequest); + assertEquals(Boolean.toString(true), request.getParameters().get("force")); + } + public void testPostMachineLearningOpenJob() throws Exception { String jobId = "some-job-id"; OpenJobRequest openJobRequest = new OpenJobRequest(jobId); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 50cd244c0fa07..a77d8b43e5737 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -25,6 +25,8 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; +import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.ml.OpenJobResponse; import org.elasticsearch.protocol.xpack.ml.PutJobRequest; @@ -122,6 +124,56 @@ public void onFailure(Exception e) { } } + public void testDeleteJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + String jobId = "my-first-machine-learning-job"; + + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("my-second-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + + { + //tag::x-pack-delete-ml-job-request + DeleteJobRequest deleteJobRequest = new DeleteJobRequest("my-first-machine-learning-job"); + deleteJobRequest.setForce(false); //<1> + DeleteJobResponse deleteJobResponse = client.machineLearning().deleteJob(deleteJobRequest, RequestOptions.DEFAULT); + //end::x-pack-delete-ml-job-request + + //tag::x-pack-delete-ml-job-response + boolean isAcknowledged = deleteJobResponse.isAcknowledged(); //<1> + //end::x-pack-delete-ml-job-response + } + { + //tag::x-pack-delete-ml-job-request-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DeleteJobResponse deleteJobResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-delete-ml-job-request-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + //tag::x-pack-delete-ml-job-request-async + DeleteJobRequest deleteJobRequest = new DeleteJobRequest("my-second-machine-learning-job"); + client.machineLearning().deleteJobAsync(deleteJobRequest, RequestOptions.DEFAULT, listener); // <1> + //end::x-pack-delete-ml-job-request-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testOpenJob() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -143,7 +195,6 @@ public void testOpenJob() throws Exception { //end::x-pack-ml-open-job-execute } - { //tag::x-pack-ml-open-job-listener ActionListener listener = new ActionListener() { @@ -154,7 +205,7 @@ public void onResponse(OpenJobResponse openJobResponse) { @Override public void onFailure(Exception e) { - //<2> + // <2> } }; //end::x-pack-ml-open-job-listener @@ -169,6 +220,5 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } - } } diff --git a/docs/java-rest/high-level/ml/delete-job.asciidoc b/docs/java-rest/high-level/ml/delete-job.asciidoc new file mode 100644 index 0000000000000..44a6a47940955 --- /dev/null +++ b/docs/java-rest/high-level/ml/delete-job.asciidoc @@ -0,0 +1,49 @@ +[[java-rest-high-x-pack-ml-delete-job]] +=== Delete Job API + +[[java-rest-high-x-pack-machine-learning-delete-job-request]] +==== Delete Job Request + +A `DeleteJobRequest` object requires a non-null `jobId` and can optionally set `force`. +Can be executed as follows: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-request] +--------------------------------------------------- +<1> Use to forcefully delete an opened job; +this method is quicker than closing and deleting the job. +Defaults to `false` + +[[java-rest-high-x-pack-machine-learning-delete-job-response]] +==== Delete Job Response + +The returned `DeleteJobResponse` object indicates the acknowledgement of the request: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-response] +--------------------------------------------------- +<1> `isAcknowledged` was the deletion request acknowledged or not + +[[java-rest-high-x-pack-machine-learning-delete-job-async]] +==== Delete Job Asynchronously + +This request can also be made asynchronously. +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-request-async] +--------------------------------------------------- +<1> The `DeleteJobRequest` to execute and the `ActionListener` to alert on completion or error. + +The deletion request returns immediately. Once the request is completed, the `ActionListener` is +called back using the `onResponse` or `onFailure`. The latter indicates some failure occurred when +making the request. + +A typical listener for a `DeleteJobRequest` could be defined as follows: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-request-listener] +--------------------------------------------------- +<1> The action to be taken when it is completed +<2> What to do when a failure occurs diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index a2db3436317c3..6bcb736243a7c 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -205,9 +205,11 @@ include::licensing/delete-license.asciidoc[] The Java High Level REST Client supports the following Machine Learning APIs: * <> +* <> * <> include::ml/put-job.asciidoc[] +include::ml/delete-job.asciidoc[] include::ml/open-job.asciidoc[] == Migration APIs diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequest.java new file mode 100644 index 0000000000000..1b7450de0929c --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequest.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; + +import java.util.Objects; + +public class DeleteJobRequest extends ActionRequest { + + private String jobId; + private boolean force; + + public DeleteJobRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId, "[job_id] must not be null"); + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = Objects.requireNonNull(jobId, "[job_id] must not be null"); + } + + public boolean isForce() { + return force; + } + + public void setForce(boolean force) { + this.force = force; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, force); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || obj.getClass() != getClass()) { + return false; + } + + DeleteJobRequest other = (DeleteJobRequest) obj; + return Objects.equals(jobId, other.jobId) && Objects.equals(force, other.force); + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponse.java new file mode 100644 index 0000000000000..0b4faa38f545f --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponse.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteJobResponse extends AcknowledgedResponse { + + public DeleteJobResponse(boolean acknowledged) { + super(acknowledged); + } + + public DeleteJobResponse() { + } + + public static DeleteJobResponse fromXContent(XContentParser parser) throws IOException { + AcknowledgedResponse response = AcknowledgedResponse.fromXContent(parser); + return new DeleteJobResponse(response.isAcknowledged()); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DeleteJobResponse that = (DeleteJobResponse) other; + return isAcknowledged() == that.isAcknowledged(); + } + + @Override + public int hashCode() { + return Objects.hash(isAcknowledged()); + } + +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequestTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequestTests.java new file mode 100644 index 0000000000000..fb8a38fa0c68e --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobRequestTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.protocol.xpack.ml.job.config.JobTests; +import org.elasticsearch.test.ESTestCase; + +public class DeleteJobRequestTests extends ESTestCase { + + private DeleteJobRequest createTestInstance() { + return new DeleteJobRequest(JobTests.randomValidJobId()); + } + + public void test_WithNullJobId() { + NullPointerException ex = expectThrows(NullPointerException.class, () -> new DeleteJobRequest(null)); + assertEquals("[job_id] must not be null", ex.getMessage()); + + ex = expectThrows(NullPointerException.class, () -> createTestInstance().setJobId(null)); + assertEquals("[job_id] must not be null", ex.getMessage()); + } + + public void test_WithForce() { + DeleteJobRequest deleteJobRequest = createTestInstance(); + assertFalse(deleteJobRequest.isForce()); + + deleteJobRequest.setForce(true); + assertTrue(deleteJobRequest.isForce()); + } +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponseTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponseTests.java new file mode 100644 index 0000000000000..a73179a08983d --- /dev/null +++ b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/DeleteJobResponseTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class DeleteJobResponseTests extends AbstractXContentTestCase { + + @Override + protected DeleteJobResponse createTestInstance() { + return new DeleteJobResponse(); + } + + @Override + protected DeleteJobResponse doParseInstance(XContentParser parser) throws IOException { + return DeleteJobResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} From 3dd1677cdc8fa6b4c36a79f7b206430661469ce8 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 16 Aug 2018 15:33:17 +0200 Subject: [PATCH 13/46] [Test] Fix DuelScrollIT#testDuelIndexOrderQueryThenFetch This commit disables the automatic `refresh_interval` in order to ensure that index readers cannot differ between the normal and scroll search. This issue is related to the 7.5 Lucene upgrade which contains a change that makes single segment merge more likely to occur (max deletes percentage). Closes #32682 --- .../java/org/elasticsearch/search/scroll/DuelScrollIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java b/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java index 1ddd11e5d0f7d..4005f1218a92f 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java @@ -199,6 +199,8 @@ private int createIndex(boolean singleShard) throws Exception { } // no replicas, as they might be ordered differently settings.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0); + // we need to control refreshes as they might take different merges into account + settings.put("index.refresh_interval", -1); assertAcked(prepareCreate("test").setSettings(settings.build()).get()); final int numDocs = randomIntBetween(10, 200); @@ -257,7 +259,6 @@ private void testDuelIndexOrder(SearchType searchType, boolean trackScores, int } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32682") public void testDuelIndexOrderQueryThenFetch() throws Exception { final SearchType searchType = RandomPicks.randomFrom(random(), Arrays.asList(SearchType.QUERY_THEN_FETCH, SearchType.DFS_QUERY_THEN_FETCH)); From eaaf37a1f99309c8a7c176c9b2cc7f56ec5c69d1 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 16 Aug 2018 15:25:51 +0200 Subject: [PATCH 14/46] AwaitFix FullClusterRestartIT#testRollupIDSchemeAfterRestart. --- .../org/elasticsearch/xpack/restart/FullClusterRestartIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 24303b8342b7e..6ead87aba6103 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -325,6 +325,7 @@ public void testRollupAfterRestart() throws Exception { } } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/32773") public void testRollupIDSchemeAfterRestart() throws Exception { assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); assumeTrue("Rollup ID scheme changed in 6.4", oldClusterVersion.before(Version.V_6_4_0)); From d9fd74bcdcabd5020e40ef7e5dd67c6de09ae0f0 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Thu, 16 Aug 2018 08:03:21 -0700 Subject: [PATCH 15/46] Painless: Special Case def (#32871) This removes def from the classes map in PainlessLookup and instead always special cases it. This prevents potential calls against the def type that shouldn't be made and forces all cases of def throughout Painless code to be special cased. --- .../java/org/elasticsearch/painless/ScriptClassInfo.java | 3 ++- .../org/elasticsearch/painless/lookup/PainlessLookup.java | 3 ++- .../elasticsearch/painless/lookup/PainlessLookupBuilder.java | 5 +---- .../elasticsearch/painless/lookup/PainlessLookupUtility.java | 4 ++-- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java index 345db46f8875f..7de8353194dda 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java @@ -21,6 +21,7 @@ import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookupUtility; +import org.elasticsearch.painless.lookup.def; import java.lang.invoke.MethodType; import java.lang.reflect.Field; @@ -190,7 +191,7 @@ private static Class definitionTypeForClass(PainlessLookup painlessLookup, Cl componentType = componentType.getComponentType(); } - if (painlessLookup.lookupPainlessClass(componentType) == null) { + if (componentType != def.class && painlessLookup.lookupPainlessClass(componentType) == null) { throw new IllegalArgumentException(unknownErrorMessageSource.apply(componentType)); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java index 16b8ac14f14f2..55855a3cb1efb 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java @@ -26,6 +26,7 @@ import java.util.Set; import java.util.function.Function; +import static org.elasticsearch.painless.lookup.PainlessLookupUtility.DEF_CLASS_NAME; import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessConstructorKey; import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessFieldKey; import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessMethodKey; @@ -47,7 +48,7 @@ public final class PainlessLookup { public boolean isValidCanonicalClassName(String canonicalClassName) { Objects.requireNonNull(canonicalClassName); - return canonicalClassNamesToClasses.containsKey(canonicalClassName); + return DEF_CLASS_NAME.equals(canonicalClassName) || canonicalClassNamesToClasses.containsKey(canonicalClassName); } public Class canonicalTypeNameToType(String canonicalTypeName) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index e644453a4c1ba..c8353b54c9f44 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -211,9 +211,6 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { public PainlessLookupBuilder() { canonicalClassNamesToClasses = new HashMap<>(); classesToPainlessClassBuilders = new HashMap<>(); - - canonicalClassNamesToClasses.put(DEF_CLASS_NAME, def.class); - classesToPainlessClassBuilders.put(def.class, new PainlessClassBuilder()); } private Class canonicalTypeNameToType(String canonicalTypeName) { @@ -225,7 +222,7 @@ private boolean isValidType(Class type) { type = type.getComponentType(); } - return classesToPainlessClassBuilders.containsKey(type); + return type == def.class || classesToPainlessClassBuilders.containsKey(type); } public void addPainlessClass(ClassLoader classLoader, String javaClassName, boolean importClassName) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java index f2eb434516961..71cacab9eba9d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java @@ -82,7 +82,7 @@ public static Class canonicalTypeNameToType(String canonicalTypeName, Map type = canonicalClassNamesToClasses.get(canonicalTypeName); + Class type = DEF_CLASS_NAME.equals(canonicalTypeName) ? def.class : canonicalClassNamesToClasses.get(canonicalTypeName); if (type != null) { return type; @@ -105,7 +105,7 @@ public static Class canonicalTypeNameToType(String canonicalTypeName, Map Date: Thu, 16 Aug 2018 11:32:35 -0400 Subject: [PATCH 16/46] Fix docs for fixed filename for heap dump path (#32882) The docs here incorrectly state that it is okay for a heap dump file to exist when heap dump path is configured to a fixed filename. This is incorrect, the JVM will fail to write the heap dump if a heap dump file already exists at the specified location (see the DumpWriter constructor DumpWriter::DumpWriter(const char* path) in the JVM source). --- .../setup/important-settings/heap-dump-path.asciidoc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/reference/setup/important-settings/heap-dump-path.asciidoc b/docs/reference/setup/important-settings/heap-dump-path.asciidoc index b0d301b21d0b8..fb8c7ff35f0d0 100644 --- a/docs/reference/setup/important-settings/heap-dump-path.asciidoc +++ b/docs/reference/setup/important-settings/heap-dump-path.asciidoc @@ -8,8 +8,8 @@ distributions, and the `data` directory under the root of the Elasticsearch installation for the <> archive distributions). If this path is not suitable for receiving heap dumps, you should modify the entry `-XX:HeapDumpPath=...` in -<>. If you specify a fixed filename instead -of a directory, the JVM will repeatedly use the same file; this is one -mechanism for preventing heap dumps from accumulating in the heap dump -path. Alternatively, you can configure a scheduled task via your OS to -remove heap dumps that are older than a configured age. +<>. If you specify a directory, the JVM +will generate a filename for the heap dump based on the PID of the running +instance. If you specify a fixed filename instead of a directory, the file must +not exist when the JVM needs to perform a heap dump on an out of memory +exception, otherwise the heap dump will fail. From d604b3e3a1aca100c4560b4d05f846148b76fef4 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Thu, 16 Aug 2018 17:18:51 +0100 Subject: [PATCH 17/46] Temporarily disabled ML BWC tests for backporting https://github.com/elastic/elasticsearch/pull/32816 --- .../test/mixed_cluster/40_ml_datafeed_crud.yml | 6 ++++++ .../rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml | 6 ++++++ .../test/upgraded_cluster/40_ml_datafeed_crud.yml | 4 ++++ 3 files changed, 16 insertions(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 0ec288f90973c..529e9e497cafe 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,9 @@ +--- +setup: + - skip: + version: "all" + reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" + --- "Test old cluster datafeed": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index c1317bdf3d660..b8cfcbcda4b1b 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -1,3 +1,9 @@ +--- +setup: + - skip: + version: "all" + reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" + --- "Put job and datafeed in old cluster": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 6b4c963dd533b..13e7289457a1b 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -1,4 +1,8 @@ setup: + - skip: + version: "all" + reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" + - do: cluster.health: wait_for_status: green From 62559d2b3c26c5617049c1c328d0e5522680e96f Mon Sep 17 00:00:00 2001 From: Ed Savage <32410745+edsavage@users.noreply.github.com> Date: Thu, 16 Aug 2018 18:23:26 +0100 Subject: [PATCH 18/46] Re enable ml bwc tests (#32916) [ML] Re-enabling BWC tests Re-enable BWC tests for ML now that #32816 has been backported to 6.x --- .../xpack/core/ml/job/config/AnalysisConfig.java | 10 ++++------ .../test/mixed_cluster/30_ml_jobs_crud.yml | 6 ------ .../test/mixed_cluster/40_ml_datafeed_crud.yml | 6 ------ .../rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml | 6 ------ .../test/old_cluster/40_ml_datafeed_crud.yml | 6 ------ .../test/upgraded_cluster/30_ml_jobs_crud.yml | 4 ---- .../test/upgraded_cluster/40_ml_datafeed_crud.yml | 4 ---- 7 files changed, 4 insertions(+), 38 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index 135ad755359e6..9068ffda4de55 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -162,9 +162,8 @@ public AnalysisConfig(StreamInput in) throws IOException { } // BWC for removed per-partition normalization - // Version check is temporarily against the latest to satisfy CI tests - // TODO change to V_6_5_0 after successful backport to 6.x - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + // TODO Remove in 7.0.0 + if (in.getVersion().before(Version.V_6_5_0)) { in.readBoolean(); } } @@ -197,9 +196,8 @@ public void writeTo(StreamOutput out) throws IOException { } // BWC for removed per-partition normalization - // Version check is temporarily against the latest to satisfy CI tests - // TODO change to V_6_5_0 after successful backport to 6.x - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + // TODO Remove in 7.0.0 + if (out.getVersion().before(Version.V_6_5_0)) { out.writeBoolean(false); } } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml index cb036b9d13a26..ba0f4d5091e0f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -1,9 +1,3 @@ ---- -setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - --- "Test get old cluster job": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 529e9e497cafe..0ec288f90973c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,9 +1,3 @@ ---- -setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - --- "Test old cluster datafeed": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml index 061a242a78d30..3a3334f6907e9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml @@ -1,9 +1,3 @@ ---- -setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - --- "Put job on the old cluster and post some data": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index b8cfcbcda4b1b..c1317bdf3d660 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -1,9 +1,3 @@ ---- -setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - --- "Put job and datafeed in old cluster": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index 1da16e79cbe61..bb47524b41d87 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - - do: cluster.health: wait_for_status: green diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 13e7289457a1b..6b4c963dd533b 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -1,8 +1,4 @@ setup: - - skip: - version: "all" - reason: "Temporarily disabled while backporting https://github.com/elastic/elasticsearch/pull/32816" - - do: cluster.health: wait_for_status: green From 0876630b3069c2d37005c21e06f33980a4541422 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 16 Aug 2018 16:25:34 -0400 Subject: [PATCH 19/46] Guard against null in email admin watches (#32923) The Kibana settings docs that these watches rely on can sometimes contain no xpack settings. When this is the case, we will end up with a null pointer exception in the script. We need to guard against in these scripts so this commit does that. --- .../monitoring/watches/elasticsearch_cluster_status.json | 2 +- .../main/resources/monitoring/watches/elasticsearch_nodes.json | 2 +- .../monitoring/watches/elasticsearch_version_mismatch.json | 2 +- .../resources/monitoring/watches/kibana_version_mismatch.json | 2 +- .../resources/monitoring/watches/logstash_version_mismatch.json | 2 +- .../resources/monitoring/watches/xpack_license_expiration.json | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json index c0a13ea63a640..e1f418d5a8d71 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json @@ -145,7 +145,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def state = ctx.payload.check.hits.hits[0]._source.cluster_state.status;if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def state = ctx.payload.check.hits.hits[0]._source.cluster_state.status;if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json index a6bf7b6145ce8..5c0cb7f55b4e5 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json @@ -151,7 +151,7 @@ }, "transform": { "script": { - "source": "void formatResults(StringBuilder message, String type, Map typeMap) {if (typeMap.empty == false) {message.append(' Node');if (typeMap.size() != 1) {message.append('s were');} else {message.append(' was');}message.append(' ').append(type).append(' [').append(typeMap.size()).append(']: ').append(typeMap.values().stream().collect(Collectors.joining(', ', '[', ']'))).append('.');}}ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;def clusterState = ctx.payload.check.hits.hits[0]._source.cluster_state;def persistentUuidToName = [:];def latestNodes = clusterState.nodes;def ephemeralUuidToPersistentUuid = [:];def payload = ['timestamp': ctx.execution_time,'updated_timestamp': ctx.execution_time,'resolved_timestamp': ctx.execution_time,'metadata': ctx.metadata.xpack,'prefix': 'Elasticsearch cluster nodes have changed!','nodes': ['hash': clusterState.nodes_hash,'added': persistentUuidToName,'removed': [:],'restarted': [:]]];for (def latestNode : latestNodes.entrySet()) {persistentUuidToName[latestNode.key] = latestNode.value.name;ephemeralUuidToPersistentUuid[latestNode.value.ephemeral_id] = latestNode.key;}def previousNodes = ctx.payload.check.hits.hits[1]._source.cluster_state.nodes;def previousPersistentUuidToName = [:];for (def previousNode : previousNodes.entrySet()){if (persistentUuidToName.containsKey(previousNode.key) == false){payload.nodes.removed[previousNode.key] = previousNode.value.name;}else{if (ephemeralUuidToPersistentUuid.containsKey(previousNode.value.ephemeral_id) == false) {payload.nodes.restarted[previousNode.key] = persistentUuidToName[previousNode.key];}persistentUuidToName.remove(previousNode.key);}}StringBuilder message = new StringBuilder();formatResults(message, 'removed', payload.nodes.removed);formatResults(message, 'added', payload.nodes.added);formatResults(message, 'restarted', payload.nodes.restarted);payload.message = message.toString().trim();return payload;" + "source": "void formatResults(StringBuilder message, String type, Map typeMap) {if (typeMap.empty == false) {message.append(' Node');if (typeMap.size() != 1) {message.append('s were');} else {message.append(' was');}message.append(' ').append(type).append(' [').append(typeMap.size()).append(']: ').append(typeMap.values().stream().collect(Collectors.joining(', ', '[', ']'))).append('.');}}ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;def clusterState = ctx.payload.check.hits.hits[0]._source.cluster_state;def persistentUuidToName = [:];def latestNodes = clusterState.nodes;def ephemeralUuidToPersistentUuid = [:];def payload = ['timestamp': ctx.execution_time,'updated_timestamp': ctx.execution_time,'resolved_timestamp': ctx.execution_time,'metadata': ctx.metadata.xpack,'prefix': 'Elasticsearch cluster nodes have changed!','nodes': ['hash': clusterState.nodes_hash,'added': persistentUuidToName,'removed': [:],'restarted': [:]]];for (def latestNode : latestNodes.entrySet()) {persistentUuidToName[latestNode.key] = latestNode.value.name;ephemeralUuidToPersistentUuid[latestNode.value.ephemeral_id] = latestNode.key;}def previousNodes = ctx.payload.check.hits.hits[1]._source.cluster_state.nodes;def previousPersistentUuidToName = [:];for (def previousNode : previousNodes.entrySet()){if (persistentUuidToName.containsKey(previousNode.key) == false){payload.nodes.removed[previousNode.key] = previousNode.value.name;}else{if (ephemeralUuidToPersistentUuid.containsKey(previousNode.value.ephemeral_id) == false) {payload.nodes.restarted[previousNode.key] = persistentUuidToName[previousNode.key];}persistentUuidToName.remove(previousNode.key);}}StringBuilder message = new StringBuilder();formatResults(message, 'removed', payload.nodes.removed);formatResults(message, 'added', payload.nodes.added);formatResults(message, 'restarted', payload.nodes.restarted);payload.message = message.toString().trim();return payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json index 7e18c981f0f1f..051a3a9d40921 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json @@ -141,7 +141,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {def versions = new ArrayList(ctx.payload.check.hits.hits[0]._source.cluster_stats.nodes.versions);Collections.sort(versions);versionMessage = 'Versions: [' + String.join(', ', versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Elasticsearch.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {def versions = new ArrayList(ctx.payload.check.hits.hits[0]._source.cluster_stats.nodes.versions);Collections.sort(versions);versionMessage = 'Versions: [' + String.join(', ', versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Elasticsearch.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json index bf2da3ffb1ddd..b2acba610e141 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json @@ -161,7 +161,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Kibana.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Kibana.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json index 71a0cfd46bfd1..cf1fdde606c7a 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json @@ -161,7 +161,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Logstash.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def versionMessage = null;if (ctx.vars.fails_check) {versionMessage = 'Versions: [' + String.join(', ', ctx.vars.versions) + '].';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check) {ctx.payload.message = versionMessage;} else {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster is running with multiple versions of Logstash.', 'message': versionMessage, 'metadata': ctx.metadata.xpack ];}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json index a05198a15eb9c..7eb0d59167dbd 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json @@ -134,7 +134,7 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def alertMessage = null;if (ctx.vars.fails_check) { alertMessage = 'Update your license.';} if (ctx.vars.not_resolved) { ctx.payload = ctx.payload.alert.hits.hits[0]._source;ctx.payload.metadata = ctx.metadata.xpack;if (ctx.vars.fails_check == false) { ctx.payload.resolved_timestamp = ctx.execution_time;} } else { ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster\\'s license is going to expire in {{#relativeTime}}metadata.time{{/relativeTime}} at {{#absoluteTime}}metadata.time{{/absoluteTime}}.', 'message': alertMessage, 'metadata': ctx.metadata.xpack ];} if (ctx.vars.fails_check) { ctx.payload.metadata.time = ctx.vars.expiry.toString();} ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def alertMessage = null;if (ctx.vars.fails_check) { alertMessage = 'Update your license.';} if (ctx.vars.not_resolved) { ctx.payload = ctx.payload.alert.hits.hits[0]._source;ctx.payload.metadata = ctx.metadata.xpack;if (ctx.vars.fails_check == false) { ctx.payload.resolved_timestamp = ctx.execution_time;} } else { ctx.payload = [ 'timestamp': ctx.execution_time, 'prefix': 'This cluster\\'s license is going to expire in {{#relativeTime}}metadata.time{{/relativeTime}} at {{#absoluteTime}}metadata.time{{/absoluteTime}}.', 'message': alertMessage, 'metadata': ctx.metadata.xpack ];} if (ctx.vars.fails_check) { ctx.payload.metadata.time = ctx.vars.expiry.toString();} ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { From cbf160a4e6c6c13f63f2d0d2dd1adf006fc70d97 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 16 Aug 2018 17:36:58 -0700 Subject: [PATCH 20/46] For filters aggs, make sure that rewrites preserve other_bucket. (#32921) --- .../bucket/filter/FiltersAggregationBuilder.java | 5 ++++- .../search/aggregations/bucket/FiltersTests.java | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index e35bf376aae4d..810126e851251 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -209,7 +209,10 @@ protected AggregationBuilder doRewrite(QueryRewriteContext queryShardContext) th } } if (changed) { - return new FiltersAggregationBuilder(getName(), rewrittenFilters, this.keyed); + FiltersAggregationBuilder rewritten = new FiltersAggregationBuilder(getName(), rewrittenFilters, this.keyed); + rewritten.otherBucket(otherBucket); + rewritten.otherBucketKey(otherBucketKey); + return rewritten; } else { return this; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java index 327a717f05c52..bdfdd4d028f0f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java @@ -178,4 +178,18 @@ public void testRewrite() throws IOException { assertSame(rewritten, rewritten.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L))); } + + public void testRewritePreservesOtherBucket() throws IOException { + FiltersAggregationBuilder originalFilters = new FiltersAggregationBuilder("my-agg", new BoolQueryBuilder()); + originalFilters.otherBucket(randomBoolean()); + originalFilters.otherBucketKey(randomAlphaOfLength(10)); + + AggregationBuilder rewritten = originalFilters.rewrite(new QueryRewriteContext(xContentRegistry(), + null, null, () -> 0L)); + assertThat(rewritten, instanceOf(FiltersAggregationBuilder.class)); + + FiltersAggregationBuilder rewrittenFilters = (FiltersAggregationBuilder) rewritten; + assertEquals(originalFilters.otherBucket(), rewrittenFilters.otherBucket()); + assertEquals(originalFilters.otherBucketKey(), rewrittenFilters.otherBucketKey()); + } } From 1136a9583792ae20ffc2d6c58324ce4e04962976 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 16 Aug 2018 21:16:06 -0600 Subject: [PATCH 21/46] Security: remove put privilege API (#32879) This commit removes the put privilege API in favor of having a single API to create and update privileges. If we see the need to have an API like this in the future we can always add it back. --- .../PutPrivilegesRequestBuilder.java | 27 ---------- .../core/security/client/SecurityClient.java | 6 --- .../xpack/security/Security.java | 2 - .../privilege/RestPutPrivilegeAction.java | 49 ------------------- .../privilege/RestPutPrivilegesAction.java | 2 + .../PutPrivilegesRequestBuilderTests.java | 30 ------------ .../api/xpack.security.put_privilege.json | 33 ------------- .../api/xpack.security.put_privileges.json | 2 +- .../test/privileges/10_basic.yml | 42 ++++++++-------- .../authz/40_condtional_cluster_priv.yml | 40 +++++++++------ 10 files changed, 49 insertions(+), 184 deletions(-) delete mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegeAction.java delete mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privilege.json diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java index b8c2685d28a11..562e22a1eb925 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Objects; @@ -34,32 +33,6 @@ public PutPrivilegesRequestBuilder(ElasticsearchClient client, PutPrivilegesActi super(client, action, new PutPrivilegesRequest()); } - /** - * Populate the put privileges request using the given source, application name and privilege name - * The source must contain a single privilege object which matches the application and privilege names. - */ - public PutPrivilegesRequestBuilder source(String applicationName, String expectedName, - BytesReference source, XContentType xContentType) - throws IOException { - Objects.requireNonNull(xContentType); - // EMPTY is ok here because we never call namedObject - try (InputStream stream = source.streamInput(); - XContentParser parser = xContentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - XContentParser.Token token = parser.currentToken(); - if (token == null) { - token = parser.nextToken(); - } - if (token == XContentParser.Token.START_OBJECT) { - final ApplicationPrivilegeDescriptor privilege = parsePrivilege(parser, applicationName, expectedName); - this.request.setPrivileges(Collections.singleton(privilege)); - } else { - throw new ElasticsearchParseException("expected an object but found {} instead", token); - } - } - return this; - } - ApplicationPrivilegeDescriptor parsePrivilege(XContentParser parser, String applicationName, String privilegeName) throws IOException { ApplicationPrivilegeDescriptor privilege = ApplicationPrivilegeDescriptor.parse(parser, applicationName, privilegeName, false); checkPrivilegeName(privilege, applicationName, privilegeName); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java index e1d3a2db8e952..d3cc60194f2cf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java @@ -292,12 +292,6 @@ public GetPrivilegesRequestBuilder prepareGetPrivileges(String applicationName, return new GetPrivilegesRequestBuilder(client, GetPrivilegesAction.INSTANCE).application(applicationName).privileges(privileges); } - public PutPrivilegesRequestBuilder preparePutPrivilege(String applicationName, String privilegeName, - BytesReference bytesReference, XContentType xContentType) throws IOException { - return new PutPrivilegesRequestBuilder(client, PutPrivilegesAction.INSTANCE) - .source(applicationName, privilegeName, bytesReference, xContentType); - } - public PutPrivilegesRequestBuilder preparePutPrivileges(BytesReference bytesReference, XContentType xContentType) throws IOException { return new PutPrivilegesRequestBuilder(client, PutPrivilegesAction.INSTANCE).source(bytesReference, xContentType); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index d31ffae13f245..857b343b753c4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -191,7 +191,6 @@ import org.elasticsearch.xpack.security.rest.action.oauth2.RestInvalidateTokenAction; import org.elasticsearch.xpack.security.rest.action.privilege.RestDeletePrivilegesAction; import org.elasticsearch.xpack.security.rest.action.privilege.RestGetPrivilegesAction; -import org.elasticsearch.xpack.security.rest.action.privilege.RestPutPrivilegeAction; import org.elasticsearch.xpack.security.rest.action.privilege.RestPutPrivilegesAction; import org.elasticsearch.xpack.security.rest.action.realm.RestClearRealmCacheAction; import org.elasticsearch.xpack.security.rest.action.role.RestClearRolesCacheAction; @@ -762,7 +761,6 @@ public List getRestHandlers(Settings settings, RestController restC new RestSamlInvalidateSessionAction(settings, restController, getLicenseState()), new RestGetPrivilegesAction(settings, restController, getLicenseState()), new RestPutPrivilegesAction(settings, restController, getLicenseState()), - new RestPutPrivilegeAction(settings, restController, getLicenseState()), new RestDeletePrivilegesAction(settings, restController, getLicenseState()) ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegeAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegeAction.java deleted file mode 100644 index 6c3ef8e70fabf..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegeAction.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security.rest.action.privilege; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.xpack.core.security.action.privilege.PutPrivilegesRequestBuilder; -import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; -import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; - -import java.io.IOException; - -import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestRequest.Method.PUT; - -/** - * Rest endpoint to add one or more {@link ApplicationPrivilege} objects to the security index - */ -public class RestPutPrivilegeAction extends SecurityBaseRestHandler { - - public RestPutPrivilegeAction(Settings settings, RestController controller, XPackLicenseState licenseState) { - super(settings, licenseState); - controller.registerHandler(PUT, "/_xpack/security/privilege/{application}/{privilege}", this); - controller.registerHandler(POST, "/_xpack/security/privilege/{application}/{privilege}", this); - } - - @Override - public String getName() { - return "xpack_security_put_privilege_action"; - } - - @Override - public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { - final String application = request.param("application"); - final String privilege = request.param("privilege"); - PutPrivilegesRequestBuilder requestBuilder = new SecurityClient(client) - .preparePutPrivilege(application, privilege, request.requiredContent(), request.getXContentType()) - .setRefreshPolicy(request.param("refresh")); - - return RestPutPrivilegesAction.execute(requestBuilder); - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java index eb1104c9bc036..dc565e3f87339 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/privilege/RestPutPrivilegesAction.java @@ -29,6 +29,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; /** * Rest endpoint to add one or more {@link ApplicationPrivilege} objects to the security index @@ -37,6 +38,7 @@ public class RestPutPrivilegesAction extends SecurityBaseRestHandler { public RestPutPrivilegesAction(Settings settings, RestController controller, XPackLicenseState licenseState) { super(settings, licenseState); + controller.registerHandler(PUT, "/_xpack/security/privilege/", this); controller.registerHandler(POST, "/_xpack/security/privilege/", this); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java index db0548c03ef30..2ece398d3d19f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilderTests.java @@ -52,36 +52,6 @@ private ApplicationPrivilegeDescriptor descriptor(String app, String name, Strin return new ApplicationPrivilegeDescriptor(app, name, Sets.newHashSet(actions), Collections.emptyMap()); } - public void testBuildRequestFromJsonObject() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); - builder.source("foo", "read", new BytesArray( - "{ \"application\":\"foo\", \"name\":\"read\", \"actions\":[ \"data:/read/*\", \"admin:/read/*\" ] }" - ), XContentType.JSON); - final List privileges = builder.request().getPrivileges(); - assertThat(privileges, iterableWithSize(1)); - assertThat(privileges, contains(descriptor("foo", "read", "data:/read/*", "admin:/read/*"))); - } - - public void testPrivilegeNameValidationOfSingleElement() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); - final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> - builder.source("foo", "write", new BytesArray( - "{ \"application\":\"foo\", \"name\":\"read\", \"actions\":[ \"data:/read/*\", \"admin:/read/*\" ] }" - ), XContentType.JSON)); - assertThat(exception.getMessage(), containsString("write")); - assertThat(exception.getMessage(), containsString("read")); - } - - public void testApplicationNameValidationOfSingleElement() throws Exception { - final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); - final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> - builder.source("bar", "read", new BytesArray( - "{ \"application\":\"foo\", \"name\":\"read\", \"actions\":[ \"data:/read/*\", \"admin:/read/*\" ] }" - ), XContentType.JSON)); - assertThat(exception.getMessage(), containsString("foo")); - assertThat(exception.getMessage(), containsString("bar")); - } - public void testPrivilegeNameValidationOfMultipleElement() throws Exception { final PutPrivilegesRequestBuilder builder = new PutPrivilegesRequestBuilder(null, PutPrivilegesAction.INSTANCE); final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privilege.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privilege.json deleted file mode 100644 index 3d453682c6431..0000000000000 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privilege.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack.security.put_privilege": { - "documentation": "TODO", - "methods": [ "POST", "PUT" ], - "url": { - "path": "/_xpack/security/privilege/{application}/{name}", - "paths": [ "/_xpack/security/privilege/{application}/{name}" ], - "parts": { - "application": { - "type" : "string", - "description" : "Application name", - "required" : true - }, - "name": { - "type" : "string", - "description" : "Privilege name", - "required" : true - } - }, - "params": { - "refresh": { - "type" : "enum", - "options": ["true", "false", "wait_for"], - "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." - } - } - }, - "body": { - "description" : "The privilege to add", - "required" : true - } - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privileges.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privileges.json index 07eb541715810..312db3c9a1821 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privileges.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_privileges.json @@ -1,7 +1,7 @@ { "xpack.security.put_privileges": { "documentation": "TODO", - "methods": [ "POST" ], + "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/privilege/", "paths": [ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/10_basic.yml index e8dddf2153576..30fa3a8d07840 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/10_basic.yml @@ -30,24 +30,26 @@ teardown: ignore: 404 --- "Test put and get privileges": - # Single privilege, with names in URL + # Single privilege - do: - xpack.security.put_privilege: - application: app - name: p1 + xpack.security.put_privileges: body: > { - "application": "app", - "name": "p1", - "actions": [ "data:read/*" , "action:login" ], - "metadata": { - "key1" : "val1a", - "key2" : "val2a" + "app": { + "p1": { + "application": "app", + "name": "p1", + "actions": [ "data:read/*" , "action:login" ], + "metadata": { + "key1" : "val1a", + "key2" : "val2a" + } + } } } - match: { "app.p1" : { created: true } } - # Multiple privileges, no names in URL + # Multiple privileges - do: xpack.security.put_privileges: body: > @@ -84,18 +86,18 @@ teardown: - match: { "app.p3" : { created: true } } - match: { "app2.p1" : { created: true } } - # Update existing privilege, with names in URL + # Update existing privilege - do: - xpack.security.put_privilege: - application: app - name: p1 + xpack.security.put_privileges: body: > { - "application": "app", - "name": "p1", - "actions": [ "data:read/*" , "action:login" ], - "metadata": { - "key3" : "val3" + "app": { + "p1": { + "actions": [ "data:read/*" , "action:login" ], + "metadata": { + "key3" : "val3" + } + } } } - match: { "app.p1" : { created: false } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/40_condtional_cluster_priv.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/40_condtional_cluster_priv.yml index b3a1e22069083..a7d3fabd2a282 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/40_condtional_cluster_priv.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/40_condtional_cluster_priv.yml @@ -31,21 +31,25 @@ setup: } - do: - xpack.security.put_privilege: - application: app-allow - name: read + xpack.security.put_privileges: body: > { - "actions": [ "data:read/*" ] + "app-allow": { + "read": { + "actions": [ "data:read/*" ] + } + } } - do: - xpack.security.put_privilege: - application: app_deny - name: read + xpack.security.put_privileges: body: > { - "actions": [ "data:read/*" ] + "app-deny": { + "read": { + "actions": [ "data:read/*" ] + } + } } --- @@ -82,12 +86,14 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.security.put_privilege: - application: app - name: read + xpack.security.put_privileges: body: > { - "actions": [ "data:read/*" ] + "app": { + "read": { + "actions": [ "data:read/*" ] + } + } } - match: { "app.read" : { created: true } } @@ -112,12 +118,14 @@ teardown: "Test put application privileges when not allowed": - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.security.put_privilege: - application: app_deny - name: write + xpack.security.put_privileges: body: > { - "actions": [ "data:write/*" ] + "app_deny": { + "write": { + "actions": [ "data:write/*" ] + } + } } catch: forbidden From d16562eab5c514f2caa9092b73c53e3a82f7d66d Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 17 Aug 2018 09:41:39 +0300 Subject: [PATCH 22/46] RFC: Test that example plugins build stand-alone (#32235) Add tests for build-tools to make sure example plugins build stand-alone using it. This will catch issues such as referencing files from the buildSrc directly, breaking external uses of build-tools. --- build.gradle | 8 + buildSrc/build.gradle | 13 ++ .../elasticsearch/gradle/BuildPlugin.groovy | 3 +- .../gradle/plugin/PluginBuildPlugin.groovy | 2 - .../plugin/PluginPropertiesExtension.groovy | 30 +++- .../gradle/plugin/PluginPropertiesTask.groovy | 1 - .../gradle/test/RestIntegTestTask.groovy | 11 +- .../gradle/BuildExamplePluginsIT.java | 164 ++++++++++++++++++ plugins/examples/custom-settings/build.gradle | 3 +- .../examples/painless-whitelist/build.gradle | 5 +- plugins/examples/rescore/build.gradle | 4 +- plugins/examples/rest-handler/build.gradle | 5 +- .../script-expert-scoring/build.gradle | 4 +- 13 files changed, 233 insertions(+), 20 deletions(-) create mode 100644 buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java diff --git a/build.gradle b/build.gradle index 3674e0a540bf8..0df5b97ae4a26 100644 --- a/build.gradle +++ b/build.gradle @@ -87,8 +87,15 @@ subprojects { } } } + repositories { + maven { + name = 'localTest' + url = "${rootProject.buildDir}/local-test-repo" + } + } } } + plugins.withType(BuildPlugin).whenPluginAdded { project.licenseFile = project.rootProject.file('licenses/APACHE-LICENSE-2.0.txt') project.noticeFile = project.rootProject.file('NOTICE.txt') @@ -228,6 +235,7 @@ subprojects { "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}": ':client:rest-high-level', "org.elasticsearch.client:test:${version}": ':client:test', "org.elasticsearch.client:transport:${version}": ':client:transport', + "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${version}": ':modules:lang-painless:spi', "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:archives:integ-test-zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:archives:zip', diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 5775b2b6323f1..967c2e27ee8df 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -162,11 +162,24 @@ if (project != rootProject) { // it's fine as we run them as part of :buildSrc test.enabled = false task integTest(type: Test) { + // integration test requires the local testing repo for example plugin builds + dependsOn project.rootProject.allprojects.collect { + it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'} + } exclude "**/*Tests.class" include "**/*IT.class" testClassesDirs = sourceSets.test.output.classesDirs classpath = sourceSets.test.runtimeClasspath inputs.dir(file("src/testKit")) + // tell BuildExamplePluginsIT where to find the example plugins + systemProperty ( + 'test.build-tools.plugin.examples', + files( + project(':example-plugins').subprojects.collect { it.projectDir } + ).asPath, + ) + systemProperty 'test.local-test-repo-path', "${rootProject.buildDir}/local-test-repo" + systemProperty 'test.lucene-snapshot-revision', (versions.lucene =~ /\w+-snapshot-([a-z0-9]+)/)[0][1] } check.dependsOn(integTest) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 306a2bcb58bd1..f3f014f0e8aa4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -554,7 +554,7 @@ class BuildPlugin implements Plugin { project.publishing { publications { nebula(MavenPublication) { - artifact project.tasks.shadowJar + artifacts = [ project.tasks.shadowJar ] artifactId = project.archivesBaseName /* * Configure the pom to include the "shadow" as compile dependencies @@ -584,7 +584,6 @@ class BuildPlugin implements Plugin { } } } - } /** Adds compiler settings to the project */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 00f178fda9c9f..6f42e41beaa1b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -25,7 +25,6 @@ import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RunTask import org.gradle.api.InvalidUserDataException -import org.gradle.api.JavaVersion import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.XmlProvider @@ -39,7 +38,6 @@ import java.nio.file.Path import java.nio.file.StandardCopyOption import java.util.regex.Matcher import java.util.regex.Pattern - /** * Encapsulates build configuration for an Elasticsearch plugin. */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy index 6cfe44c806833..c250d7695a832 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy @@ -20,6 +20,7 @@ package org.elasticsearch.gradle.plugin import org.gradle.api.Project import org.gradle.api.tasks.Input +import org.gradle.api.tasks.InputFile /** * A container for plugin properties that will be written to the plugin descriptor, for easy @@ -55,18 +56,39 @@ class PluginPropertiesExtension { boolean requiresKeystore = false /** A license file that should be included in the built plugin zip. */ - @Input - File licenseFile = null + private File licenseFile = null /** * A notice file that should be included in the built plugin zip. This will be * extended with notices from the {@code licenses/} directory. */ - @Input - File noticeFile = null + private File noticeFile = null + + Project project = null PluginPropertiesExtension(Project project) { name = project.name version = project.version + this.project = project + } + + @InputFile + File getLicenseFile() { + return licenseFile + } + + void setLicenseFile(File licenseFile) { + project.ext.licenseFile = licenseFile + this.licenseFile = licenseFile + } + + @InputFile + File getNoticeFile() { + return noticeFile + } + + void setNoticeFile(File noticeFile) { + project.ext.noticeFile = noticeFile + this.noticeFile = noticeFile } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index 8e913153f05ad..9588f77a71db7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -23,7 +23,6 @@ import org.gradle.api.InvalidUserDataException import org.gradle.api.Task import org.gradle.api.tasks.Copy import org.gradle.api.tasks.OutputFile - /** * Creates a plugin descriptor. */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index d2101c48aabdc..2838849981a1b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -31,6 +31,7 @@ import org.gradle.api.provider.Provider import org.gradle.api.tasks.Copy import org.gradle.api.tasks.Input import org.gradle.api.tasks.TaskState +import org.gradle.plugins.ide.idea.IdeaPlugin import java.nio.charset.StandardCharsets import java.nio.file.Files @@ -243,10 +244,12 @@ public class RestIntegTestTask extends DefaultTask { } } } - project.idea { - module { - if (scopes.TEST != null) { - scopes.TEST.plus.add(project.configurations.restSpec) + if (project.plugins.hasPlugin(IdeaPlugin)) { + project.idea { + module { + if (scopes.TEST != null) { + scopes.TEST.plus.add(project.configurations.restSpec) + } } } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java new file mode 100644 index 0000000000000..9b63d6f45e06b --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -0,0 +1,164 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.commons.io.FileUtils; +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.GradleRunner; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class BuildExamplePluginsIT extends GradleIntegrationTestCase { + + private static List EXAMPLE_PLUGINS = Collections.unmodifiableList( + Arrays.stream( + Objects.requireNonNull(System.getProperty("test.build-tools.plugin.examples")) + .split(File.pathSeparator) + ).map(File::new).collect(Collectors.toList()) + ); + + @Rule + public TemporaryFolder tmpDir = new TemporaryFolder(); + + public final File examplePlugin; + + public BuildExamplePluginsIT(File examplePlugin) { + this.examplePlugin = examplePlugin; + } + + @BeforeClass + public static void assertProjectsExist() { + assertEquals( + EXAMPLE_PLUGINS, + EXAMPLE_PLUGINS.stream().filter(File::exists).collect(Collectors.toList()) + ); + } + + @ParametersFactory + public static Iterable parameters() { + return EXAMPLE_PLUGINS + .stream() + .map(each -> new Object[] {each}) + .collect(Collectors.toList()); + } + + public void testCurrentExamplePlugin() throws IOException { + FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot()); + // just get rid of deprecation warnings + Files.write( + getTempPath("settings.gradle"), + "enableFeaturePreview('STABLE_PUBLISHING')\n".getBytes(StandardCharsets.UTF_8) + ); + + adaptBuildScriptForTest(); + + Files.write( + tmpDir.newFile("NOTICE.txt").toPath(), + "dummy test notice".getBytes(StandardCharsets.UTF_8) + ); + + GradleRunner.create() + .withProjectDir(tmpDir.getRoot()) + .withArguments("clean", "check", "-s", "-i", "--warning-mode=all", "--scan") + .withPluginClasspath() + .build(); + } + + private void adaptBuildScriptForTest() throws IOException { + // Add the local repo as a build script URL so we can pull in build-tools and apply the plugin under test + // + is ok because we have no other repo and just want to pick up latest + writeBuildScript( + "buildscript {\n" + + " repositories {\n" + + " maven {\n" + + " url = '" + getLocalTestRepoPath() + "'\n" + + " }\n" + + " }\n" + + " dependencies {\n" + + " classpath \"org.elasticsearch.gradle:build-tools:+\"\n" + + " }\n" + + "}\n" + ); + // get the original file + Files.readAllLines(getTempPath("build.gradle"), StandardCharsets.UTF_8) + .stream() + .map(line -> line + "\n") + .forEach(this::writeBuildScript); + // Add a repositories section to be able to resolve dependencies + String luceneSnapshotRepo = ""; + String luceneSnapshotRevision = System.getProperty("test.lucene-snapshot-revision"); + if (luceneSnapshotRepo != null) { + luceneSnapshotRepo = " maven {\n" + + " url \"http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + "\"\n" + + " }\n"; + } + writeBuildScript("\n" + + "repositories {\n" + + " maven {\n" + + " url \"" + getLocalTestRepoPath() + "\"\n" + + " }\n" + + luceneSnapshotRepo + + "}\n" + ); + Files.delete(getTempPath("build.gradle")); + Files.move(getTempPath("build.gradle.new"), getTempPath("build.gradle")); + System.err.print("Generated build script is:"); + Files.readAllLines(getTempPath("build.gradle")).forEach(System.err::println); + } + + private Path getTempPath(String fileName) { + return new File(tmpDir.getRoot(), fileName).toPath(); + } + + private Path writeBuildScript(String script) { + try { + Path path = getTempPath("build.gradle.new"); + return Files.write( + path, + script.getBytes(StandardCharsets.UTF_8), + Files.exists(path) ? StandardOpenOption.APPEND : StandardOpenOption.CREATE_NEW + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private String getLocalTestRepoPath() { + String property = System.getProperty("test.local-test-repo-path"); + Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); + File file = new File(property); + assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); + return file.getAbsolutePath(); + } + +} diff --git a/plugins/examples/custom-settings/build.gradle b/plugins/examples/custom-settings/build.gradle index e0e728cec2427..3caf29c8513b5 100644 --- a/plugins/examples/custom-settings/build.gradle +++ b/plugins/examples/custom-settings/build.gradle @@ -16,13 +16,14 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { name 'custom-settings' description 'An example plugin showing how to register custom settings' classname 'org.elasticsearch.example.customsettings.ExampleCustomSettingsPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } integTestCluster { diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index ef1ca7d741e9a..cb2aeb82e9d01 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -16,7 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { @@ -24,10 +23,12 @@ esplugin { description 'An example whitelisting additional classes and methods in painless' classname 'org.elasticsearch.example.painlesswhitelist.MyWhitelistPlugin' extendedPlugins = ['lang-painless'] + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } dependencies { - compileOnly project(':modules:lang-painless') + compileOnly "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${versions.elasticsearch}" } if (System.getProperty('tests.distribution') == null) { diff --git a/plugins/examples/rescore/build.gradle b/plugins/examples/rescore/build.gradle index 4adeb0c721baf..cdecd760c81e8 100644 --- a/plugins/examples/rescore/build.gradle +++ b/plugins/examples/rescore/build.gradle @@ -16,11 +16,13 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { name 'example-rescore' description 'An example plugin implementing rescore and verifying that plugins *can* implement rescore' classname 'org.elasticsearch.example.rescore.ExampleRescorePlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } + diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index cfe84e6a45a93..eff2fd1b6c6e4 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -16,13 +16,14 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { name 'rest-handler' description 'An example plugin showing how to register a REST handler' classname 'org.elasticsearch.example.resthandler.ExampleRestHandlerPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } // No unit tests in this example @@ -40,4 +41,4 @@ integTestCluster { } integTestRunner { systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" -} +} \ No newline at end of file diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index 7c602d9bc027d..e9da62acdcff4 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -16,13 +16,15 @@ * specific language governing permissions and limitations * under the License. */ - apply plugin: 'elasticsearch.esplugin' esplugin { name 'script-expert-scoring' description 'An example script engine to use low level Lucene internals for expert scoring' classname 'org.elasticsearch.example.expertscript.ExpertScriptPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } test.enabled = false + From 148a76f0c7498ca55c9b56c605578fdb04bded30 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 17 Aug 2018 11:14:58 +0300 Subject: [PATCH 23/46] Fix failing BuildExamplePluginsIT test --- plugins/examples/custom-suggester/build.gradle | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle index b36d5cd218d27..977e467391d8b 100644 --- a/plugins/examples/custom-suggester/build.gradle +++ b/plugins/examples/custom-suggester/build.gradle @@ -23,6 +23,8 @@ esplugin { name 'custom-suggester' description 'An example plugin showing how to write and register a custom suggester' classname 'org.elasticsearch.example.customsuggester.CustomSuggesterPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } integTestCluster { @@ -30,4 +32,4 @@ integTestCluster { } // this plugin has no unit tests, only rest tests -tasks.test.enabled = false \ No newline at end of file +tasks.test.enabled = false From efdad7d5fc32b0887a6fa2f34b259dfb566cf77b Mon Sep 17 00:00:00 2001 From: JeffSaxeVA <42005772+JeffSaxeVA@users.noreply.github.com> Date: Fri, 17 Aug 2018 04:56:06 -0400 Subject: [PATCH 24/46] [DOCS] Add "remove a tag" script logic as an example (#32556) It took me quite a while of online searching and experimenting to realize the function-call asymmetry in the Add versus Remove from a list, like the "tags" list! I realize we cannot give examples for every single thing the user wants to do in Painless, but this is such a common use case (removing a tag from a single doc, or from a set of docs with Update-By-Query) that I believe it ought to be demonstrated immediately after the "add a tag" example. We have an example of removing an entire document field, but not removing one element of a list (a multi-valued field). Also, a minor grammar fix: I have added an apostrophe to the word "its" in the accompanying text of the example just above. --- docs/reference/docs/update.asciidoc | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 7ba7e2da63369..1cfc122bee402 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -47,7 +47,7 @@ POST test/_doc/1/_update // TEST[continued] We can add a tag to the list of tags (note, if the tag exists, it -will still add it, since its a list): +will still add it, since it's a list): [source,js] -------------------------------------------------- @@ -65,6 +65,28 @@ POST test/_doc/1/_update // CONSOLE // TEST[continued] +We can remove a tag from the list of tags. Note that the Painless function to +`remove` a tag takes as its parameter the array index of the element you wish +to remove, so you need a bit more logic to locate it while avoiding a runtime +error. Note that if the tag was present more than once in the list, this will +remove only one occurrence of it: + +[source,js] +-------------------------------------------------- +POST test/_doc/1/_update +{ + "script" : { + "source": "if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }", + "lang": "painless", + "params" : { + "tag" : "blue" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + In addition to `_source`, the following variables are available through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing` and `_now` (the current timestamp). @@ -172,7 +194,7 @@ the request was ignored. "_index": "test", "_type": "_doc", "_id": "1", - "_version": 6, + "_version": 7, "result": "noop" } -------------------------------------------------- From dd5a5aab88edc88f0f7949ecda67432efdcfc616 Mon Sep 17 00:00:00 2001 From: JB Nizet Date: Fri, 17 Aug 2018 10:59:26 +0200 Subject: [PATCH 25/46] Fix allowed value for HighlighterBuilder encoder in javadocs (#32780) Relates to #32745 --- .../search/fetch/subphase/highlight/HighlightBuilder.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java index 049de439ac750..9483e76d072a8 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -220,7 +220,7 @@ public HighlightBuilder tagsSchema(String schemaName) { /** * Set encoder for the highlighting - * are {@code styled} and {@code default}. + * are {@code html} and {@code default}. * * @param encoder name */ From ae38cfbaec24ed4ab5a28f091e2c9173ab06ea58 Mon Sep 17 00:00:00 2001 From: markwalkom Date: Fri, 17 Aug 2018 19:03:09 +1000 Subject: [PATCH 26/46] [DOCS] Update getting-started.asciidoc (#29518) Highlighted that you can change shard counts using `_shrink` and `_split`. --- docs/reference/getting-started.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index b89021e1cfe59..3e44b2c41f67e 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -93,7 +93,8 @@ Replication is important for two primary reasons: To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). -The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number of shards after-the-fact. + +The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may also change the number of replicas dynamically anytime. You can change the number of shards for an existing index using the {ref}/indices-shrink-index.html[`_shrink`] and {ref}/indices-split-index.html[`_split`] APIs, however this is not a trivial task and pre-planning for the correct number of shards is the optimal approach. By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index. From 76aba8ad7bf4a5c8bce78fbefdfbd9d987294e92 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 17 Aug 2018 10:57:00 +0100 Subject: [PATCH 27/46] HLRC: Move ML request converters into their own class (#32906) --- .../client/MLRequestConverters.java | 78 ++++++++++++++++ .../client/MachineLearningClient.java | 12 +-- .../client/RequestConverters.java | 45 +--------- .../client/MLRequestConvertersTests.java | 90 +++++++++++++++++++ .../client/RequestConvertersTests.java | 29 ------ .../xpack/ml/job/config/AnalysisConfig.java | 4 + .../xpack/ml/job/config/Detector.java | 4 + .../protocol/xpack/ml/job/config/Job.java | 6 +- 8 files changed, 188 insertions(+), 80 deletions(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java new file mode 100644 index 0000000000000..e26a4c629a0b0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.RequestConverters.EndpointBuilder; +import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; +import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; +import org.elasticsearch.protocol.xpack.ml.PutJobRequest; + +import java.io.IOException; + +import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; +import static org.elasticsearch.client.RequestConverters.createEntity; + +final class MLRequestConverters { + + private MLRequestConverters() {} + + static Request putJob(PutJobRequest putJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(putJobRequest.getJob().getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + request.setEntity(createEntity(putJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request openJob(OpenJobRequest openJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(openJobRequest.getJobId()) + .addPathPartAsIs("_open") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setJsonEntity(openJobRequest.toString()); + return request; + } + + static Request deleteJob(DeleteJobRequest deleteJobRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(deleteJobRequest.getJobId()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + params.putParam("force", Boolean.toString(deleteJobRequest.isForce())); + + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 2e7914e64abdb..32b6cd6cf2c67 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -57,7 +57,7 @@ public final class MachineLearningClient { */ public PutJobResponse putJob(PutJobRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, - RequestConverters::putMachineLearningJob, + MLRequestConverters::putJob, options, PutJobResponse::fromXContent, Collections.emptySet()); @@ -75,7 +75,7 @@ public PutJobResponse putJob(PutJobRequest request, RequestOptions options) thro */ public void putJobAsync(PutJobRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, - RequestConverters::putMachineLearningJob, + MLRequestConverters::putJob, options, PutJobResponse::fromXContent, listener, @@ -95,7 +95,7 @@ public void putJobAsync(PutJobRequest request, RequestOptions options, ActionLis */ public DeleteJobResponse deleteJob(DeleteJobRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, - RequestConverters::deleteMachineLearningJob, + MLRequestConverters::deleteJob, options, DeleteJobResponse::fromXContent, Collections.emptySet()); @@ -113,7 +113,7 @@ public DeleteJobResponse deleteJob(DeleteJobRequest request, RequestOptions opti */ public void deleteJobAsync(DeleteJobRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, - RequestConverters::deleteMachineLearningJob, + MLRequestConverters::deleteJob, options, DeleteJobResponse::fromXContent, listener, @@ -138,7 +138,7 @@ public void deleteJobAsync(DeleteJobRequest request, RequestOptions options, Act */ public OpenJobResponse openJob(OpenJobRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, - RequestConverters::machineLearningOpenJob, + MLRequestConverters::openJob, options, OpenJobResponse::fromXContent, Collections.emptySet()); @@ -160,7 +160,7 @@ public OpenJobResponse openJob(OpenJobRequest request, RequestOptions options) t */ public void openJobAsync(OpenJobRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, - RequestConverters::machineLearningOpenJob, + MLRequestConverters::openJob, options, OpenJobResponse::fromXContent, listener, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index c40b4893e0146..0e5fce5b2272c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -112,9 +112,6 @@ import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; -import org.elasticsearch.protocol.xpack.ml.PutJobRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.rest.action.search.RestSearchAction; @@ -1200,46 +1197,6 @@ static Request deleteLicense(DeleteLicenseRequest deleteLicenseRequest) { return request; } - static Request putMachineLearningJob(PutJobRequest putJobRequest) throws IOException { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("ml") - .addPathPartAsIs("anomaly_detectors") - .addPathPart(putJobRequest.getJob().getId()) - .build(); - Request request = new Request(HttpPut.METHOD_NAME, endpoint); - request.setEntity(createEntity(putJobRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request deleteMachineLearningJob(DeleteJobRequest deleteJobRequest) { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("ml") - .addPathPartAsIs("anomaly_detectors") - .addPathPart(deleteJobRequest.getJobId()) - .build(); - Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - - Params params = new Params(request); - params.putParam("force", Boolean.toString(deleteJobRequest.isForce())); - - return request; - } - - static Request machineLearningOpenJob(OpenJobRequest openJobRequest) throws IOException { - String endpoint = new EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("ml") - .addPathPartAsIs("anomaly_detectors") - .addPathPart(openJobRequest.getJobId()) - .addPathPartAsIs("_open") - .build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - request.setJsonEntity(openJobRequest.toString()); - return request; - } - static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRequest) { EndpointBuilder endpointBuilder = new EndpointBuilder() .addPathPartAsIs("_xpack/migration/assistance") @@ -1251,7 +1208,7 @@ static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRe return request; } - private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { + static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java new file mode 100644 index 0000000000000..43a41960e003c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; +import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; +import org.elasticsearch.protocol.xpack.ml.PutJobRequest; +import org.elasticsearch.protocol.xpack.ml.job.config.AnalysisConfig; +import org.elasticsearch.protocol.xpack.ml.job.config.Detector; +import org.elasticsearch.protocol.xpack.ml.job.config.Job; +import org.elasticsearch.test.ESTestCase; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; + +public class MLRequestConvertersTests extends ESTestCase { + + public void testPutJob() throws IOException { + Job job = createValidJob("foo"); + PutJobRequest putJobRequest = new PutJobRequest(job); + + Request request = MLRequestConverters.putJob(putJobRequest); + + assertThat(request.getEndpoint(), equalTo("/_xpack/ml/anomaly_detectors/foo")); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + Job parsedJob = Job.PARSER.apply(parser, null).build(); + assertThat(parsedJob, equalTo(job)); + } + } + + public void testOpenJob() throws Exception { + String jobId = "some-job-id"; + OpenJobRequest openJobRequest = new OpenJobRequest(jobId); + openJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); + + Request request = MLRequestConverters.openJob(openJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint()); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + request.getEntity().writeTo(bos); + assertEquals(bos.toString("UTF-8"), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}"); + } + + public void testDeleteJob() { + String jobId = randomAlphaOfLength(10); + DeleteJobRequest deleteJobRequest = new DeleteJobRequest(jobId); + + Request request = MLRequestConverters.deleteJob(deleteJobRequest); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId, request.getEndpoint()); + assertEquals(Boolean.toString(false), request.getParameters().get("force")); + + deleteJobRequest.setForce(true); + request = MLRequestConverters.deleteJob(deleteJobRequest); + assertEquals(Boolean.toString(true), request.getParameters().get("force")); + } + + private static Job createValidJob(String jobId) { + AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( + Detector.builder().setFunction("count").build())); + Job.Builder jobBuilder = Job.builder(jobId); + jobBuilder.setAnalysisConfig(analysisConfig); + return jobBuilder.build(); + } +} \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 786cb94f8926d..47195f0bb2aba 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -127,8 +127,6 @@ import org.elasticsearch.index.rankeval.RestRankEvalAction; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; -import org.elasticsearch.protocol.xpack.ml.OpenJobRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.repositories.fs.FsRepository; @@ -2612,33 +2610,6 @@ public void testXPackDeleteWatch() { assertThat(request.getEntity(), nullValue()); } - public void testDeleteMachineLearningJob() { - String jobId = randomAlphaOfLength(10); - DeleteJobRequest deleteJobRequest = new DeleteJobRequest(jobId); - - Request request = RequestConverters.deleteMachineLearningJob(deleteJobRequest); - assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); - assertEquals("/_xpack/ml/anomaly_detectors/" + jobId, request.getEndpoint()); - assertEquals(Boolean.toString(false), request.getParameters().get("force")); - - deleteJobRequest.setForce(true); - request = RequestConverters.deleteMachineLearningJob(deleteJobRequest); - assertEquals(Boolean.toString(true), request.getParameters().get("force")); - } - - public void testPostMachineLearningOpenJob() throws Exception { - String jobId = "some-job-id"; - OpenJobRequest openJobRequest = new OpenJobRequest(jobId); - openJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); - - Request request = RequestConverters.machineLearningOpenJob(openJobRequest); - assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_open", request.getEndpoint()); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - request.getEntity().writeTo(bos); - assertEquals(bos.toString("UTF-8"), "{\"job_id\":\""+ jobId +"\",\"timeout\":\"10m\"}"); - } - /** * Randomize the {@link FetchSourceContext} request parameters. */ diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java index 00fa1bdd47fed..7baaae52a8bfa 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/AnalysisConfig.java @@ -300,6 +300,10 @@ public int hashCode() { multivariateByFields); } + public static Builder builder(List detectors) { + return new Builder(detectors); + } + public static class Builder { private List detectors; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java index 3274b03877f14..042d48b700688 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Detector.java @@ -265,6 +265,10 @@ public int hashCode() { excludeFrequent, rules, detectorIndex); } + public static Builder builder() { + return new Builder(); + } + public static class Builder { private String detectorDescription; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java index 6bc1be3b56384..59840cfec2ae0 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/config/Job.java @@ -412,6 +412,10 @@ public final String toString() { return Strings.toString(this); } + public static Builder builder(String id) { + return new Builder(id); + } + public static class Builder { private String id; @@ -435,7 +439,7 @@ public static class Builder { private String resultsIndexName; private boolean deleted; - public Builder() { + private Builder() { } public Builder(String id) { From 2fa028cfa12067d81ac4bef875b5f8fa84bac5ac Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Fri, 17 Aug 2018 12:36:45 +0200 Subject: [PATCH 28/46] Remove assertion in testDocStats on deletedDocs counter (#32914) testDocStats test is flaky and sometimes it's failing on jenkins and failure is not reproducible locally. The reason for this failure is in timing. If the number of deleted documents is greater than 33% of inserted documents, Lucene will schedule segments to merge if TieredMergePolicy is used (it's not the case for LogMergePolicy, but ES is only using TieredMergePolicy). If this merge is performed before stats are retrieved - we will get 0 for "deleted" counter. So basically this counter could be either 0 or numOfDeletedDocs at this point, but this is the too loose assertion and we decided to remove it at all. Closes #32766 --- .../java/org/elasticsearch/index/shard/IndexShardTests.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 47be03b991788..2228e1b017fd4 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2391,8 +2391,7 @@ public void testRecoverFromLocalShard() throws IOException { closeShards(sourceShard, targetShard); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32766") - public void testDocStats() throws IOException { + public void testDocStats() throws IOException, InterruptedException { IndexShard indexShard = null; try { indexShard = newStartedShard(); @@ -2441,8 +2440,6 @@ public void testDocStats() throws IOException { assertTrue(searcher.reader().numDocs() <= docStats.getCount()); } assertThat(docStats.getCount(), equalTo(numDocs)); - // Lucene will delete a segment if all docs are deleted from it; this means that we lose the deletes when deleting all docs - assertThat(docStats.getDeleted(), equalTo(numDocsToDelete == numDocs ? 0 : numDocsToDelete)); } // merge them away From ca54aacbb5a8603ec14916c4d65da69086dee8ae Mon Sep 17 00:00:00 2001 From: Paul Sanwald Date: Fri, 17 Aug 2018 07:03:25 -0400 Subject: [PATCH 29/46] Fix InternalAutoDateHistogram reproducible failure (#32723) Update test logic to correctly bucket intervals. --- .../AutoDateHistogramAggregationBuilder.java | 2 +- .../histogram/InternalAutoDateHistogram.java | 8 +- .../InternalAutoDateHistogramTests.java | 113 ++++++++++++++---- 3 files changed, 95 insertions(+), 28 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 50a0c85c041c8..b97670ddb5730 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -156,7 +156,7 @@ public int getNumBuckets() { return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData); } - private static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) { + static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) { Rounding.Builder tzRoundingBuilder = Rounding.builder(interval); if (timeZone != null) { tzRoundingBuilder.timeZone(timeZone); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 6a78ca6724988..7bc2b9a317838 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -418,7 +418,7 @@ private BucketReduceResult addEmptyBuckets(BucketReduceResult currentResult, Red return currentResult; } int roundingIdx = getAppropriateRounding(list.get(0).key, list.get(list.size() - 1).key, currentResult.roundingIdx, - bucketInfo.roundingInfos); + bucketInfo.roundingInfos, targetBuckets); RoundingInfo roundingInfo = bucketInfo.roundingInfos[roundingIdx]; Rounding rounding = roundingInfo.rounding; // merge buckets using the new rounding @@ -447,8 +447,8 @@ private BucketReduceResult addEmptyBuckets(BucketReduceResult currentResult, Red return new BucketReduceResult(list, roundingInfo, roundingIdx); } - private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, - RoundingInfo[] roundings) { + static int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, + RoundingInfo[] roundings, int targetBuckets) { if (roundingIdx == roundings.length - 1) { return roundingIdx; } @@ -480,7 +480,7 @@ private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, currentKey = currentRounding.nextRoundingValue(currentKey); } currentRoundingIdx++; - } while (requiredBuckets > (targetBuckets * roundings[roundingIdx].getMaximumInnerInterval()) + } while (requiredBuckets > (targetBuckets * roundings[currentRoundingIdx - 1].getMaximumInnerInterval()) && currentRoundingIdx < roundings.length); // The loop will increase past the correct rounding index here so we // need to subtract one to get the rounding index we need diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index 981d263d7d633..b7c5bf03ac552 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; @@ -28,7 +29,11 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -39,6 +44,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueHours; import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.createRounding; +import static org.hamcrest.Matchers.equalTo; public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregationTestCase { @@ -61,6 +68,7 @@ protected InternalAutoDateHistogram createTestInstance(String name, int nbBuckets = randomNumberOfBuckets(); int targetBuckets = randomIntBetween(1, nbBuckets * 2 + 1); List buckets = new ArrayList<>(nbBuckets); + long startingDate = System.currentTimeMillis(); long interval = randomIntBetween(1, 3); @@ -72,23 +80,41 @@ protected InternalAutoDateHistogram createTestInstance(String name, } InternalAggregations subAggregations = new InternalAggregations(Collections.emptyList()); BucketInfo bucketInfo = new BucketInfo(roundingInfos, randomIntBetween(0, roundingInfos.length - 1), subAggregations); + return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + } + /* + This test was added to reproduce a bug where getAppropriateRounding was only ever using the first innerIntervals + passed in, instead of using the interval associated with the loop. + */ + public void testGetAppropriateRoundingUsesCorrectIntervals() { + RoundingInfo[] roundings = new RoundingInfo[6]; + DateTimeZone timeZone = DateTimeZone.UTC; + // Since we pass 0 as the starting index to getAppropriateRounding, we'll also use + // an innerInterval that is quite large, such that targetBuckets * roundings[i].getMaximumInnerInterval() + // will be larger than the estimate. + roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone), + 1000L, 1000); + roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone), + 60 * 1000L, 1, 5, 10, 30); + roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone), + 60 * 60 * 1000L, 1, 3, 12); - return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC); + // We want to pass a roundingIdx of zero, because in order to reproduce this bug, we need the function + // to increment the rounding (because the bug was that the function would not use the innerIntervals + // from the new rounding. + int result = InternalAutoDateHistogram.getAppropriateRounding(timestamp.toEpochSecond()*1000, + timestamp.plusDays(1).toEpochSecond()*1000, 0, roundings, 25); + assertThat(result, equalTo(2)); } @Override protected void assertReduced(InternalAutoDateHistogram reduced, List inputs) { - int roundingIdx = 0; - for (InternalAutoDateHistogram histogram : inputs) { - if (histogram.getBucketInfo().roundingIdx > roundingIdx) { - roundingIdx = histogram.getBucketInfo().roundingIdx; - } - } - RoundingInfo roundingInfo = roundingInfos[roundingIdx]; long lowest = Long.MAX_VALUE; long highest = 0; + for (InternalAutoDateHistogram histogram : inputs) { for (Histogram.Bucket bucket : histogram.getBuckets()) { long bucketKey = ((DateTime) bucket.getKey()).getMillis(); @@ -100,35 +126,72 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List= 0; j--) { + int interval = roundingInfo.innerIntervals[j]; + if (normalizedDuration / interval < reduced.getBuckets().size()) { + innerIntervalToUse = interval; + innerIntervalIndex = j; + } + } + } + + long intervalInMillis = innerIntervalToUse * roundingInfo.getRoughEstimateDurationMillis(); + int bucketCount = getBucketCount(lowest, highest, roundingInfo, intervalInMillis); + + //Next, if our bucketCount is still above what we need, we'll go back and determine the interval + // based on a size calculation. + if (bucketCount > reduced.getBuckets().size()) { + for (int i = innerIntervalIndex; i < roundingInfo.innerIntervals.length; i++) { + long newIntervalMillis = roundingInfo.innerIntervals[i] * roundingInfo.getRoughEstimateDurationMillis(); + if (getBucketCount(lowest, highest, roundingInfo, newIntervalMillis) <= reduced.getBuckets().size()) { + innerIntervalToUse = roundingInfo.innerIntervals[i]; + intervalInMillis = innerIntervalToUse * roundingInfo.getRoughEstimateDurationMillis(); + } } } + Map expectedCounts = new TreeMap<>(); - long intervalInMillis = innerIntervalToUse*roundingInfo.getRoughEstimateDurationMillis(); for (long keyForBucket = roundingInfo.rounding.round(lowest); - keyForBucket <= highest; + keyForBucket <= roundingInfo.rounding.round(highest); keyForBucket = keyForBucket + intervalInMillis) { expectedCounts.put(keyForBucket, 0L); + // Iterate through the input buckets, and for each bucket, determine if it's inside + // the range of the bucket in the outer loop. if it is, add the doc count to the total + // for that bucket. + for (InternalAutoDateHistogram histogram : inputs) { for (Histogram.Bucket bucket : histogram.getBuckets()) { - long bucketKey = ((DateTime) bucket.getKey()).getMillis(); - long roundedBucketKey = roundingInfo.rounding.round(bucketKey); + long roundedBucketKey = roundingInfo.rounding.round(((DateTime) bucket.getKey()).getMillis()); + long docCount = bucket.getDocCount(); if (roundedBucketKey >= keyForBucket && roundedBucketKey < keyForBucket + intervalInMillis) { - long count = bucket.getDocCount(); expectedCounts.compute(keyForBucket, - (key, oldValue) -> (oldValue == null ? 0 : oldValue) + count); + (key, oldValue) -> (oldValue == null ? 0 : oldValue) + docCount); } } } } + // If there is only a single bucket, and we haven't added it above, add a bucket with no documents. + // this step is necessary because of the roundedBucketKey < keyForBucket + intervalInMillis above. + if (roundingInfo.rounding.round(lowest) == roundingInfo.rounding.round(highest) && expectedCounts.isEmpty()) { + expectedCounts.put(roundingInfo.rounding.round(lowest), 0L); + } + + // pick out the actual reduced values to the make the assertion more readable Map actualCounts = new TreeMap<>(); for (Histogram.Bucket bucket : reduced.getBuckets()) { actualCounts.compute(((DateTime) bucket.getKey()).getMillis(), @@ -137,12 +200,16 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List instanceReader() { return InternalAutoDateHistogram::new; From 75014a22d726cc95cdd00d9cb5a59924049d56a6 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Fri, 17 Aug 2018 14:06:24 +0300 Subject: [PATCH 30/46] Enable FIPS140LicenseBootstrapCheck (#32903) This commit ensures that xpack.security.fips_mode.enabled: true cannot be set in a node that doesn't have the appropriate license. --- .../main/java/org/elasticsearch/xpack/security/Security.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 857b343b753c4..02910b5dd74fc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -301,7 +301,8 @@ public Security(Settings settings, final Path configPath) { new TLSLicenseBootstrapCheck(), new FIPS140SecureSettingsBootstrapCheck(settings, env), new FIPS140JKSKeystoreBootstrapCheck(settings), - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings))); + new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings), + new FIPS140LicenseBootstrapCheck(XPackSettings.FIPS_MODE_ENABLED.get(settings)))); checks.addAll(InternalRealms.getBootstrapChecks(settings, env)); this.bootstrapChecks = Collections.unmodifiableList(checks); Automatons.updateMaxDeterminizedStates(settings); From 0d92f377fd3ecc9c6336f4770fc92951368a2aec Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Fri, 17 Aug 2018 14:09:01 +0200 Subject: [PATCH 31/46] Tests: Fix timezone conversion in DateTimeUnitTests This fix prevernts trying to parse unknown timezone ids by converting the joda time zone via java.util.TimeZone to a java time based ZoneId. Closes #32927 --- .../org/elasticsearch/common/rounding/DateTimeUnitTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java index 33ea83c759297..f188eb4cac6f4 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java @@ -69,7 +69,7 @@ public void testEnumIds() { public void testConversion() { long millis = randomLongBetween(0, Instant.now().toEpochMilli()); DateTimeZone zone = randomDateTimeZone(); - ZoneId zoneId = ZoneId.of(zone.getID()); + ZoneId zoneId = zone.toTimeZone().toZoneId(); int offsetSeconds = zoneId.getRules().getOffset(Instant.ofEpochMilli(millis)).getTotalSeconds(); long parsedMillisJavaTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId) From a08127c072915b8598bbbf4ed7302aa737cddd6c Mon Sep 17 00:00:00 2001 From: Jonathan Little Date: Fri, 17 Aug 2018 05:11:18 -0700 Subject: [PATCH 32/46] Scripted metric aggregations: add deprecation warning and system property to control legacy params (#31597) * Scripted metric aggregations: add deprecation warning and system property to control legacy params Scripted metric aggregation params._agg/_aggs are replaced by state/states context variables. By default the old params are still present, and a deprecation warning is emitted when Scripted Metric Aggregations are used. A new system property can be used to disable the legacy params. This functionality will be removed in a future revision. * Fix minor style issue and docs test failure * Disable deprecated params._agg/_aggs in tests and revise tests to use state/states instead * Add integration test covering deprecated scripted metrics aggs params._agg/_aggs access * Disable deprecated params._agg/_aggs in docs integration tests and revise stored scripts to use state/states instead * Revert unnecessary migrations doc change A relevant note should be added in the changes destined for 7.0; this PR is going to be backported to 6.x. * Replace deprecated _agg param bwc integration test with a couple of unit tests * Fix compatibility test after merge * Rename backwards compatibility system property per code review feedback * Tweak deprecation warning text per review feedback --- .../elasticsearch/gradle/BuildPlugin.groovy | 2 + docs/build.gradle | 11 +- server/build.gradle | 12 + .../script/ScriptedMetricAggContexts.java | 21 ++ .../scripted/InternalScriptedMetric.java | 4 +- .../ScriptedMetricAggregatorFactory.java | 13 +- .../metrics/ScriptedMetricIT.java | 338 +++++++----------- ...alScriptedMetricAggStateV6CompatTests.java | 109 ++++++ .../scripted/InternalScriptedMetricTests.java | 4 +- ...MetricAggregatorAggStateV6CompatTests.java | 180 ++++++++++ .../ScriptedMetricAggregatorTests.java | 74 ++-- 11 files changed, 512 insertions(+), 256 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index f3f014f0e8aa4..bf3ffcabe2f68 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -798,6 +798,8 @@ class BuildPlugin implements Plugin { systemProperty 'tests.task', path systemProperty 'tests.security.manager', 'true' systemProperty 'jna.nosys', 'true' + // TODO: remove this deprecation compatibility setting for 7.0 + systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'false' systemProperty 'compiler.java', project.ext.compilerJavaVersion.getMajorVersion() if (project.ext.inFipsJvm) { systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS" diff --git a/docs/build.gradle b/docs/build.gradle index 029147bba2ff0..8ee5c8a8e5399 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -41,6 +41,9 @@ integTestCluster { // TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults systemProperty 'es.scripting.use_java_time', 'false' systemProperty 'es.scripting.update.ctx_in_params', 'false' + + // TODO: remove this deprecation compatibility setting for 7.0 + systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'false' } // remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed @@ -400,25 +403,25 @@ buildRestTests.setups['stored_scripted_metric_script'] = ''' - do: put_script: id: "my_init_script" - body: { "script": { "lang": "painless", "source": "params._agg.transactions = []" } } + body: { "script": { "lang": "painless", "source": "state.transactions = []" } } - match: { acknowledged: true } - do: put_script: id: "my_map_script" - body: { "script": { "lang": "painless", "source": "params._agg.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } } + body: { "script": { "lang": "painless", "source": "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } } - match: { acknowledged: true } - do: put_script: id: "my_combine_script" - body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in params._agg.transactions) { profit += t; } return profit" } } + body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in state.transactions) { profit += t; } return profit" } } - match: { acknowledged: true } - do: put_script: id: "my_reduce_script" - body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in params._aggs) { profit += a; } return profit" } } + body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in states) { profit += a; } return profit" } } - match: { acknowledged: true } ''' diff --git a/server/build.gradle b/server/build.gradle index 1964eddd03e95..b22a93a702c2b 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -342,3 +342,15 @@ if (isEclipse == false || project.path == ":server-tests") { integTest.mustRunAfter test } +// TODO: remove these compatibility tests in 7.0 +additionalTest('testScriptedMetricAggParamsV6Compatibility') { + include '**/ScriptedMetricAggregatorAggStateV6CompatTests.class' + include '**/InternalScriptedMetricAggStateV6CompatTests.class' + systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'true' +} + +test { + // these are tested explicitly in separate test tasks + exclude '**/ScriptedMetricAggregatorAggStateV6CompatTests.class' + exclude '**/InternalScriptedMetricAggStateV6CompatTests.class' +} diff --git a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java index 774dc95d39977..0c34c59b7be58 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java @@ -22,6 +22,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorer; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -31,6 +33,25 @@ import java.util.Map; public class ScriptedMetricAggContexts { + private static final DeprecationLogger DEPRECATION_LOGGER = + new DeprecationLogger(Loggers.getLogger(ScriptedMetricAggContexts.class)); + + // Public for access from tests + public static final String AGG_PARAM_DEPRECATION_WARNING = + "params._agg/_aggs for scripted metric aggregations are deprecated, use state/states (not in params) instead. " + + "Use -Des.aggregations.enable_scripted_metric_agg_param=false to disable."; + + public static boolean deprecatedAggParamEnabled() { + boolean enabled = Boolean.parseBoolean( + System.getProperty("es.aggregations.enable_scripted_metric_agg_param", "true")); + + if (enabled) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("enable_scripted_metric_agg_param", AGG_PARAM_DEPRECATION_WARNING); + } + + return enabled; + } + private abstract static class ParamsAndStateBase { private final Map params; private final Object state; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index f4281c063ff2c..4124a8eeb76ad 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -96,7 +96,9 @@ public InternalAggregation doReduce(List aggregations, Redu } // Add _aggs to params map for backwards compatibility (redundant with a context variable on the ReduceScript created below). - params.put("_aggs", aggregationObjects); + if (ScriptedMetricAggContexts.deprecatedAggParamEnabled()) { + params.put("_aggs", aggregationObjects); + } ScriptedMetricAggContexts.ReduceScript.Factory factory = reduceContext.scriptService().compile( firstAggregation.reduceScript, ScriptedMetricAggContexts.ReduceScript.CONTEXT); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java index 9bd904a07013d..076c29feceae6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java @@ -83,10 +83,17 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu // Add _agg to params map for backwards compatibility (redundant with context variables on the scripts created below). // When this is removed, aggState (as passed to ScriptedMetricAggregator) can be changed to Map, since // it won't be possible to completely replace it with another type as is possible when it's an entry in params. - if (aggParams.containsKey("_agg") == false) { - aggParams.put("_agg", new HashMap()); + Object aggState = new HashMap(); + if (ScriptedMetricAggContexts.deprecatedAggParamEnabled()) { + if (aggParams.containsKey("_agg") == false) { + // Add _agg if it wasn't added manually + aggParams.put("_agg", aggState); + } else { + // If it was added manually, also use it for the agg context variable to reduce the likelihood of + // weird behavior due to multiple different variables. + aggState = aggParams.get("_agg"); + } } - Object aggState = aggParams.get("_agg"); final ScriptedMetricAggContexts.InitScript initScript = this.initScript.newInstance( mergeParams(aggParams, initScriptParams), aggState); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 13e1489795996..c000b7fb22891 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -67,6 +67,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; @@ -90,42 +91,57 @@ public static class CustomScriptPlugin extends MockScriptPlugin { protected Map, Object>> pluginScripts() { Map, Object>> scripts = new HashMap<>(); - scripts.put("_agg['count'] = 1", vars -> - aggScript(vars, agg -> ((Map) agg).put("count", 1))); + scripts.put("state['count'] = 1", vars -> + aggScript(vars, state -> state.put("count", 1))); - scripts.put("_agg.add(1)", vars -> - aggScript(vars, agg -> ((List) agg).add(1))); + scripts.put("state.list.add(1)", vars -> + aggScript(vars, state -> { + // Lazily populate state.list for tests without an init script + if (state.containsKey("list") == false) { + state.put("list", new ArrayList()); + } + + ((List) state.get("list")).add(1); + })); - scripts.put("_agg[param1] = param2", vars -> - aggScript(vars, agg -> ((Map) agg).put(XContentMapValues.extractValue("params.param1", vars), + scripts.put("state[param1] = param2", vars -> + aggScript(vars, state -> state.put((String) XContentMapValues.extractValue("params.param1", vars), XContentMapValues.extractValue("params.param2", vars)))); scripts.put("vars.multiplier = 3", vars -> ((Map) vars.get("vars")).put("multiplier", 3)); - scripts.put("_agg.add(vars.multiplier)", vars -> - aggScript(vars, agg -> ((List) agg).add(XContentMapValues.extractValue("vars.multiplier", vars)))); + scripts.put("state.list.add(vars.multiplier)", vars -> + aggScript(vars, state -> { + // Lazily populate state.list for tests without an init script + if (state.containsKey("list") == false) { + state.put("list", new ArrayList()); + } + + ((List) state.get("list")).add(XContentMapValues.extractValue("vars.multiplier", vars)); + })); // Equivalent to: // // newaggregation = []; // sum = 0; // - // for (a in _agg) { - // sum += a + // for (s in state.list) { + // sum += s // }; // // newaggregation.add(sum); // return newaggregation" // - scripts.put("sum agg values as a new aggregation", vars -> { + scripts.put("sum state values as a new aggregation", vars -> { List newAggregation = new ArrayList(); - List agg = (List) vars.get("_agg"); + Map state = (Map) vars.get("state"); + List list = (List) state.get("list"); - if (agg != null) { + if (list != null) { Integer sum = 0; - for (Object a : (List) agg) { - sum += ((Number) a).intValue(); + for (Object s : list) { + sum += ((Number) s).intValue(); } newAggregation.add(sum); } @@ -137,24 +153,41 @@ protected Map, Object>> pluginScripts() { // newaggregation = []; // sum = 0; // - // for (aggregation in _aggs) { - // for (a in aggregation) { - // sum += a + // for (state in states) { + // for (s in state) { + // sum += s // } // }; // // newaggregation.add(sum); // return newaggregation" // - scripts.put("sum aggs of agg values as a new aggregation", vars -> { + scripts.put("sum all states (lists) values as a new aggregation", vars -> { List newAggregation = new ArrayList(); Integer sum = 0; - List aggs = (List) vars.get("_aggs"); - for (Object aggregation : (List) aggs) { - if (aggregation != null) { - for (Object a : (List) aggregation) { - sum += ((Number) a).intValue(); + List> states = (List>) vars.get("states"); + for (List list : states) { + if (list != null) { + for (Object s : list) { + sum += ((Number) s).intValue(); + } + } + } + newAggregation.add(sum); + return newAggregation; + }); + + scripts.put("sum all states' state.list values as a new aggregation", vars -> { + List newAggregation = new ArrayList(); + Integer sum = 0; + + List> states = (List>) vars.get("states"); + for (Map state : states) { + List list = (List) state.get("list"); + if (list != null) { + for (Object s : list) { + sum += ((Number) s).intValue(); } } } @@ -167,25 +200,25 @@ protected Map, Object>> pluginScripts() { // newaggregation = []; // sum = 0; // - // for (aggregation in _aggs) { - // for (a in aggregation) { - // sum += a + // for (state in states) { + // for (s in state) { + // sum += s // } // }; // // newaggregation.add(sum * multiplier); // return newaggregation" // - scripts.put("multiplied sum aggs of agg values as a new aggregation", vars -> { + scripts.put("multiplied sum all states (lists) values as a new aggregation", vars -> { Integer multiplier = (Integer) vars.get("multiplier"); List newAggregation = new ArrayList(); Integer sum = 0; - List aggs = (List) vars.get("_aggs"); - for (Object aggregation : (List) aggs) { - if (aggregation != null) { - for (Object a : (List) aggregation) { - sum += ((Number) a).intValue(); + List> states = (List>) vars.get("states"); + for (List list : states) { + if (list != null) { + for (Object s : list) { + sum += ((Number) s).intValue(); } } } @@ -193,53 +226,12 @@ protected Map, Object>> pluginScripts() { return newAggregation; }); - scripts.put("state.items = new ArrayList()", vars -> - aggContextScript(vars, state -> ((HashMap) state).put("items", new ArrayList()))); - - scripts.put("state.items.add(1)", vars -> - aggContextScript(vars, state -> { - HashMap stateMap = (HashMap) state; - List items = (List) stateMap.get("items"); - items.add(1); - })); - - scripts.put("sum context state values", vars -> { - int sum = 0; - HashMap state = (HashMap) vars.get("state"); - List items = (List) state.get("items"); - - for (Object x : items) { - sum += (Integer)x; - } - - return sum; - }); - - scripts.put("sum context states", vars -> { - Integer sum = 0; - - List states = (List) vars.get("states"); - for (Object state : states) { - sum += ((Number) state).intValue(); - } - - return sum; - }); - return scripts; } - static Object aggScript(Map vars, Consumer fn) { - return aggScript(vars, fn, "_agg"); - } - - static Object aggContextScript(Map vars, Consumer fn) { - return aggScript(vars, fn, "state"); - } - @SuppressWarnings("unchecked") - private static Object aggScript(Map vars, Consumer fn, String stateVarName) { - T aggState = (T) vars.get(stateVarName); + static Map aggScript(Map vars, Consumer> fn) { + Map aggState = (Map) vars.get("state"); fn.accept(aggState); return aggState; } @@ -285,17 +277,17 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().cluster().preparePutStoredScript() .setId("mapScript_stored") .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"_agg.add(vars.multiplier)\"} }"), XContentType.JSON)); + " \"source\": \"state.list.add(vars.multiplier)\"} }"), XContentType.JSON)); assertAcked(client().admin().cluster().preparePutStoredScript() .setId("combineScript_stored") .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"sum agg values as a new aggregation\"} }"), XContentType.JSON)); + " \"source\": \"sum state values as a new aggregation\"} }"), XContentType.JSON)); assertAcked(client().admin().cluster().preparePutStoredScript() .setId("reduceScript_stored") .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"sum aggs of agg values as a new aggregation\"} }"), XContentType.JSON)); + " \"source\": \"sum all states (lists) values as a new aggregation\"} }"), XContentType.JSON)); indexRandom(true, builders); ensureSearchable(); @@ -315,9 +307,10 @@ public void setUp() throws Exception { // the name of the file script is used in test method while the source of the file script // must match a predefined script from CustomScriptPlugin.pluginScripts() method Files.write(scripts.resolve("init_script.mockscript"), "vars.multiplier = 3".getBytes("UTF-8")); - Files.write(scripts.resolve("map_script.mockscript"), "_agg.add(vars.multiplier)".getBytes("UTF-8")); - Files.write(scripts.resolve("combine_script.mockscript"), "sum agg values as a new aggregation".getBytes("UTF-8")); - Files.write(scripts.resolve("reduce_script.mockscript"), "sum aggs of agg values as a new aggregation".getBytes("UTF-8")); + Files.write(scripts.resolve("map_script.mockscript"), "state.list.add(vars.multiplier)".getBytes("UTF-8")); + Files.write(scripts.resolve("combine_script.mockscript"), "sum state values as a new aggregation".getBytes("UTF-8")); + Files.write(scripts.resolve("reduce_script.mockscript"), + "sum all states (lists) values as a new aggregation".getBytes("UTF-8")); } catch (IOException e) { throw new RuntimeException("failed to create scripts"); } @@ -329,7 +322,7 @@ protected Path nodeConfigPath(int nodeOrdinal) { } public void testMap() { - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -365,52 +358,12 @@ public void testMap() { assertThat(numShardsRun, greaterThan(0)); } - public void testExplicitAggParam() { - Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); - - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); - - SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)) - .get(); - assertSearchResponse(response); - assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - long totalCount = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; - for (Object o : list) { - assertThat(o, notNullValue()); - assertThat(o, instanceOf(Number.class)); - Number numberValue = (Number) o; - assertThat(numberValue, equalTo((Number) 1)); - totalCount += numberValue.longValue(); - } - } - assertThat(totalCount, equalTo(numDocs)); - } - - public void testMapWithParamsAndImplicitAggMap() { + public void testMapWithParams() { // Split the params up between the script and the aggregation. - // Don't put any _agg map in params. Map scriptParams = Collections.singletonMap("param1", "12"); Map aggregationParams = Collections.singletonMap("param2", 1); - // The _agg hashmap will be available even if not declared in the params map - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", scriptParams); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state[param1] = param2", scriptParams); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -454,7 +407,6 @@ public void testInitMapWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); SearchResponse response = client() @@ -466,7 +418,7 @@ public void testInitMapWithParams() { .initScript( new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) .mapScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "_agg.add(vars.multiplier)", Collections.emptyMap()))) + "state.list.add(vars.multiplier)", Collections.emptyMap()))) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); @@ -483,8 +435,11 @@ public void testInitMapWithParams() { long totalCount = 0; for (Object object : aggregationList) { assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; + assertThat(object, instanceOf(HashMap.class)); + Map map = (Map) object; + assertThat(map, hasKey("list")); + assertThat(map.get("list"), instanceOf(List.class)); + List list = (List) map.get("list"); for (Object o : list) { assertThat(o, notNullValue()); assertThat(o, instanceOf(Number.class)); @@ -501,12 +456,11 @@ public void testMapCombineWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -553,13 +507,13 @@ public void testInitMapCombineWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -607,15 +561,15 @@ public void testInitMapCombineReduceWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -652,15 +606,15 @@ public void testInitMapCombineReduceGetProperty() throws Exception { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse searchResponse = client() .prepareSearch("idx") @@ -707,14 +661,14 @@ public void testMapCombineReduceWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -749,13 +703,13 @@ public void testInitMapReduceWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states' state.list values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -789,12 +743,12 @@ public void testMapReduceWithParams() { Map varsMap = new HashMap<>(); varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states' state.list values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -828,18 +782,18 @@ public void testInitMapCombineReduceWithParamsAndReduceParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Map reduceParams = new HashMap<>(); reduceParams.put("multiplier", 4); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "multiplied sum aggs of agg values as a new aggregation", reduceParams); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "multiplied sum all states (lists) values as a new aggregation", reduceParams); SearchResponse response = client() .prepareSearch("idx") @@ -875,7 +829,6 @@ public void testInitMapCombineReduceWithParamsStored() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); SearchResponse response = client() @@ -916,15 +869,15 @@ public void testInitMapCombineReduceWithParamsAsSubAgg() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -977,15 +930,15 @@ public void testEmptyAggregation() throws Exception { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) @@ -1021,7 +974,7 @@ public void testEmptyAggregation() throws Exception { * not using a script does get cached. */ public void testDontCacheScripts() throws Exception { - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap()); assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .get()); @@ -1047,7 +1000,7 @@ public void testDontCacheScripts() throws Exception { public void testConflictingAggAndScriptParams() { Map params = Collections.singletonMap("param1", "12"); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", params); SearchRequestBuilder builder = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -1056,37 +1009,4 @@ public void testConflictingAggAndScriptParams() { SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get); assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters")); } - - public void testAggFromContext() { - Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.items = new ArrayList()", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.items.add(1)", Collections.emptyMap()); - Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum context state values", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum context states", - Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(Integer.class)); - Integer aggResult = (Integer) scriptedMetricAggregation.aggregation(); - long totalAgg = aggResult.longValue(); - assertThat(totalAgg, equalTo(numDocs)); - } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java new file mode 100644 index 0000000000000..4abf68a960b11 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics.scripted; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptedMetricAggContexts; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.Aggregation.CommonFields; +import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.test.InternalAggregationTestCase; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; + +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.sameInstance; + +/** + * This test verifies that the _aggs param is added correctly when the system property + * "es.aggregations.enable_scripted_metric_agg_param" is set to true. + */ +public class InternalScriptedMetricAggStateV6CompatTests extends InternalAggregationTestCase { + + private static final String REDUCE_SCRIPT_NAME = "reduceScript"; + + @Override + protected InternalScriptedMetric createTestInstance(String name, List pipelineAggregators, + Map metaData) { + Script reduceScript = new Script(ScriptType.INLINE, MockScriptEngine.NAME, REDUCE_SCRIPT_NAME, Collections.emptyMap()); + return new InternalScriptedMetric(name, "agg value", reduceScript, pipelineAggregators, metaData); + } + + /** + * Mock of the script service. The script that is run looks at the + * "_aggs" parameter to verify that it was put in place by InternalScriptedMetric. + */ + @Override + protected ScriptService mockScriptService() { + Function, Object> script = params -> { + Object aggs = params.get("_aggs"); + Object states = params.get("states"); + assertThat(aggs, instanceOf(List.class)); + assertThat(aggs, sameInstance(states)); + return aggs; + }; + + @SuppressWarnings("unchecked") + MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, + Collections.singletonMap(REDUCE_SCRIPT_NAME, script)); + Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); + return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); + } + + @Override + protected void assertReduced(InternalScriptedMetric reduced, List inputs) { + assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING); + } + + @Override + protected Reader instanceReader() { + return InternalScriptedMetric::new; + } + + @Override + protected void assertFromXContent(InternalScriptedMetric aggregation, ParsedAggregation parsedAggregation) {} + + @Override + protected Predicate excludePathsFromXContentInsertion() { + return path -> path.contains(CommonFields.VALUE.getPreferredName()); + } + + @Override + protected InternalScriptedMetric mutateInstance(InternalScriptedMetric instance) { + String name = instance.getName(); + Object value = instance.aggregation(); + Script reduceScript = instance.reduceScript; + List pipelineAggregators = instance.pipelineAggregators(); + Map metaData = instance.getMetaData(); + return new InternalScriptedMetric(name + randomAlphaOfLength(5), value, reduceScript, pipelineAggregators, + metaData); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java index 584208af4177c..70ddacf5698b2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java @@ -107,7 +107,7 @@ private static Object randomValue(Supplier[] valueTypes, int level) { /** * Mock of the script service. The script that is run looks at the - * "_aggs" parameter visible when executing the script and simply returns the count. + * "states" context variable visible when executing the script and simply returns the count. * This should be equal to the number of input InternalScriptedMetrics that are reduced * in total. */ @@ -116,7 +116,7 @@ protected ScriptService mockScriptService() { // mock script always retuns the size of the input aggs list as result @SuppressWarnings("unchecked") MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, - Collections.singletonMap(REDUCE_SCRIPT_NAME, script -> ((List) script.get("_aggs")).size())); + Collections.singletonMap(REDUCE_SCRIPT_NAME, script -> ((List) script.get("states")).size())); Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java new file mode 100644 index 0000000000000..bf78cae711b9d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics.scripted; + +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptedMetricAggContexts; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +import static java.util.Collections.singleton; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; + +/** + * This test verifies that the _agg param is added correctly when the system property + * "es.aggregations.enable_scripted_metric_agg_param" is set to true. + */ +public class ScriptedMetricAggregatorAggStateV6CompatTests extends AggregatorTestCase { + + private static final String AGG_NAME = "scriptedMetric"; + private static final Script INIT_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScript", Collections.emptyMap()); + private static final Script MAP_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScript", Collections.emptyMap()); + private static final Script COMBINE_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScript", + Collections.emptyMap()); + + private static final Script INIT_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME, + "initScriptExplicitAgg", Collections.emptyMap()); + private static final Script MAP_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME, + "mapScriptExplicitAgg", Collections.emptyMap()); + private static final Script COMBINE_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME, + "combineScriptExplicitAgg", Collections.emptyMap()); + private static final String EXPLICIT_AGG_OBJECT = "Explicit agg object"; + + private static final Map, Object>> SCRIPTS = new HashMap<>(); + + @BeforeClass + @SuppressWarnings("unchecked") + public static void initMockScripts() { + // If _agg is provided implicitly, it should be the same objects as "state" from the context. + SCRIPTS.put("initScript", params -> { + Object agg = params.get("_agg"); + Object state = params.get("state"); + assertThat(agg, instanceOf(Map.class)); + assertThat(agg, sameInstance(state)); + return agg; + }); + SCRIPTS.put("mapScript", params -> { + Object agg = params.get("_agg"); + Object state = params.get("state"); + assertThat(agg, instanceOf(Map.class)); + assertThat(agg, sameInstance(state)); + return agg; + }); + SCRIPTS.put("combineScript", params -> { + Object agg = params.get("_agg"); + Object state = params.get("state"); + assertThat(agg, instanceOf(Map.class)); + assertThat(agg, sameInstance(state)); + return agg; + }); + + SCRIPTS.put("initScriptExplicitAgg", params -> { + Object agg = params.get("_agg"); + assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT)); + return agg; + }); + SCRIPTS.put("mapScriptExplicitAgg", params -> { + Object agg = params.get("_agg"); + assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT)); + return agg; + }); + SCRIPTS.put("combineScriptExplicitAgg", params -> { + Object agg = params.get("_agg"); + assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT)); + return agg; + }); + } + + /** + * Test that the _agg param is implicitly added + */ + public void testWithImplicitAggParam() throws IOException { + try (Directory directory = newDirectory()) { + Integer numDocs = 10; + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT).combineScript(COMBINE_SCRIPT); + search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); + } + } + + assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING); + } + + /** + * Test that an explicitly added _agg param is honored + */ + public void testWithExplicitAggParam() throws IOException { + try (Directory directory = newDirectory()) { + Integer numDocs = 10; + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + Map aggParams = new HashMap<>(); + aggParams.put("_agg", EXPLICIT_AGG_OBJECT); + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + aggregationBuilder + .params(aggParams) + .initScript(INIT_SCRIPT_EXPLICIT_AGG) + .mapScript(MAP_SCRIPT_EXPLICIT_AGG) + .combineScript(COMBINE_SCRIPT_EXPLICIT_AGG); + search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); + } + } + + assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING); + } + + /** + * We cannot use Mockito for mocking QueryShardContext in this case because + * script-related methods (e.g. QueryShardContext#getLazyExecutableScript) + * is final and cannot be mocked + */ + @Override + protected QueryShardContext queryShardContextMock(MapperService mapperService) { + MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, SCRIPTS); + Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); + ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); + return new QueryShardContext(0, mapperService.getIndexSettings(), null, null, mapperService, null, scriptService, + xContentRegistry(), writableRegistry(), null, null, System::currentTimeMillis, null); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java index 5124503fc0369..65e42556461a5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java @@ -83,72 +83,72 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { @SuppressWarnings("unchecked") public static void initMockScripts() { SCRIPTS.put("initScript", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("collector", new ArrayList()); - return agg; - }); + Map state = (Map) params.get("state"); + state.put("collector", new ArrayList()); + return state; + }); SCRIPTS.put("mapScript", params -> { - Map agg = (Map) params.get("_agg"); - ((List) agg.get("collector")).add(1); // just add 1 for each doc the script is run on - return agg; + Map state = (Map) params.get("state"); + ((List) state.get("collector")).add(1); // just add 1 for each doc the script is run on + return state; }); SCRIPTS.put("combineScript", params -> { - Map agg = (Map) params.get("_agg"); - return ((List) agg.get("collector")).stream().mapToInt(Integer::intValue).sum(); + Map state = (Map) params.get("state"); + return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).sum(); }); SCRIPTS.put("initScriptScore", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("collector", new ArrayList()); - return agg; - }); + Map state = (Map) params.get("state"); + state.put("collector", new ArrayList()); + return state; + }); SCRIPTS.put("mapScriptScore", params -> { - Map agg = (Map) params.get("_agg"); - ((List) agg.get("collector")).add(((Number) params.get("_score")).doubleValue()); - return agg; + Map state = (Map) params.get("state"); + ((List) state.get("collector")).add(((Number) params.get("_score")).doubleValue()); + return state; }); SCRIPTS.put("combineScriptScore", params -> { - Map agg = (Map) params.get("_agg"); - return ((List) agg.get("collector")).stream().mapToDouble(Double::doubleValue).sum(); + Map state = (Map) params.get("state"); + return ((List) state.get("collector")).stream().mapToDouble(Double::doubleValue).sum(); }); SCRIPTS.put("initScriptParams", params -> { - Map agg = (Map) params.get("_agg"); + Map state = (Map) params.get("state"); Integer initialValue = (Integer)params.get("initialValue"); ArrayList collector = new ArrayList<>(); collector.add(initialValue); - agg.put("collector", collector); - return agg; + state.put("collector", collector); + return state; }); SCRIPTS.put("mapScriptParams", params -> { - Map agg = (Map) params.get("_agg"); + Map state = (Map) params.get("state"); Integer itemValue = (Integer) params.get("itemValue"); - ((List) agg.get("collector")).add(itemValue); - return agg; + ((List) state.get("collector")).add(itemValue); + return state; }); SCRIPTS.put("combineScriptParams", params -> { - Map agg = (Map) params.get("_agg"); + Map state = (Map) params.get("state"); int divisor = ((Integer) params.get("divisor")); - return ((List) agg.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); + return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); }); SCRIPTS.put("initScriptSelfRef", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("collector", new ArrayList()); - agg.put("selfRef", agg); - return agg; + Map state = (Map) params.get("state"); + state.put("collector", new ArrayList()); + state.put("selfRef", state); + return state; }); SCRIPTS.put("mapScriptSelfRef", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("selfRef", agg); - return agg; + Map state = (Map) params.get("state"); + state.put("selfRef", state); + return state; }); SCRIPTS.put("combineScriptSelfRef", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("selfRef", agg); - return agg; + Map state = (Map) params.get("state"); + state.put("selfRef", state); + return state; }); } @@ -170,7 +170,7 @@ public void testNoDocs() throws IOException { } /** - * without combine script, the "_aggs" map should contain a list of the size of the number of documents matched + * without combine script, the "states" map should contain a list of the size of the number of documents matched */ public void testScriptedMetricWithoutCombine() throws IOException { try (Directory directory = newDirectory()) { From 9cec4aa14b7ea2a4283348df9d5a45cf1a5ab137 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 17 Aug 2018 07:21:17 -0500 Subject: [PATCH 33/46] [ML] fix updating opened jobs scheduled events (#31651) (#32881) * ML: fix updating opened jobs scheduled events (#31651) * Adding UpdateParamsTests license header * Adding integration test and addressing PR comments * addressing test and job names --- .../xpack/core/ml/job/config/JobUpdate.java | 2 +- .../core/ml/job/config/JobUpdateTests.java | 2 + .../job/process/autodetect/UpdateParams.java | 1 + .../process/autodetect/UpdateParamsTests.java | 45 ++++++++++ .../ml/integration/ScheduledEventsIT.java | 83 ++++++++++++++++++- 5 files changed, 129 insertions(+), 4 deletions(-) create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParamsTests.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 380f540a31782..cdfd9bad7f1de 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -258,7 +258,7 @@ public Version getJobVersion() { } public boolean isAutodetectProcessUpdate() { - return modelPlotConfig != null || detectorUpdates != null; + return modelPlotConfig != null || detectorUpdates != null || groups != null; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index c1f25bead224e..9aedf61859d32 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -274,6 +274,8 @@ public void testIsAutodetectProcessUpdate() { assertTrue(update.isAutodetectProcessUpdate()); update = new JobUpdate.Builder("foo").setDetectorUpdates(Collections.singletonList(mock(JobUpdate.DetectorUpdate.class))).build(); assertTrue(update.isAutodetectProcessUpdate()); + update = new JobUpdate.Builder("foo").setGroups(Arrays.asList("bar")).build(); + assertTrue(update.isAutodetectProcessUpdate()); } public void testUpdateAnalysisLimitWithValueGreaterThanMax() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParams.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParams.java index 127fb18e5fff4..2d338890f9fa4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParams.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParams.java @@ -66,6 +66,7 @@ public static UpdateParams fromJobUpdate(JobUpdate jobUpdate) { return new Builder(jobUpdate.getJobId()) .modelPlotConfig(jobUpdate.getModelPlotConfig()) .detectorUpdates(jobUpdate.getDetectorUpdates()) + .updateScheduledEvents(jobUpdate.getGroups() != null) .build(); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParamsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParamsTests.java new file mode 100644 index 0000000000000..2683c1131f5bf --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/UpdateParamsTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.process.autodetect; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; +import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; +import org.elasticsearch.xpack.core.ml.job.config.Operator; +import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + + +public class UpdateParamsTests extends ESTestCase { + + public void testFromJobUpdate() { + String jobId = "foo"; + DetectionRule rule = new DetectionRule.Builder(Arrays.asList( + new RuleCondition(RuleCondition.AppliesTo.ACTUAL, + Operator.GT, 1.0))).build(); + List rules = Arrays.asList(rule); + List detectorUpdates = Collections.singletonList( + new JobUpdate.DetectorUpdate(2, null, rules)); + JobUpdate.Builder updateBuilder = new JobUpdate.Builder(jobId) + .setModelPlotConfig(new ModelPlotConfig()) + .setDetectorUpdates(detectorUpdates); + + UpdateParams params = UpdateParams.fromJobUpdate(updateBuilder.build()); + + assertFalse(params.isUpdateScheduledEvents()); + assertEquals(params.getDetectorUpdates(), updateBuilder.build().getDetectorUpdates()); + assertEquals(params.getModelPlotConfig(), updateBuilder.build().getModelPlotConfig()); + + params = UpdateParams.fromJobUpdate(updateBuilder.setGroups(Arrays.asList("bar")).build()); + + assertTrue(params.isUpdateScheduledEvents()); + } + +} diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java index 6703e4ef2365b..fb261908e2c10 100644 --- a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java +++ b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java @@ -12,11 +12,13 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; +import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.junit.After; @@ -193,9 +195,9 @@ public void testScheduledEventWithInterimResults() throws IOException { /** * Test an open job picks up changes to scheduled events/calendars */ - public void testOnlineUpdate() throws Exception { + public void testAddEventsToOpenJob() throws Exception { TimeValue bucketSpan = TimeValue.timeValueMinutes(30); - Job.Builder job = createJob("scheduled-events-online-update", bucketSpan); + Job.Builder job = createJob("scheduled-events-add-events-to-open-job", bucketSpan); long startTime = 1514764800000L; final int bucketCount = 5; @@ -209,7 +211,7 @@ public void testOnlineUpdate() throws Exception { // Now create a calendar and events for the job while it is open String calendarId = "test-calendar-online-update"; - putCalendar(calendarId, Collections.singletonList(job.getId()), "testOnlineUpdate calendar"); + putCalendar(calendarId, Collections.singletonList(job.getId()), "testAddEventsToOpenJob calendar"); List events = new ArrayList<>(); long eventStartTime = startTime + (bucketCount + 1) * bucketSpan.millis(); @@ -257,6 +259,81 @@ public void testOnlineUpdate() throws Exception { assertEquals(0, buckets.get(8).getScheduledEvents().size()); } + /** + * An open job that later gets added to a calendar, should take the scheduled events into account + */ + public void testAddOpenedJobToGroupWithCalendar() throws Exception { + TimeValue bucketSpan = TimeValue.timeValueMinutes(30); + String groupName = "opened-calendar-job-group"; + Job.Builder job = createJob("scheduled-events-add-opened-job-to-group-with-calendar", bucketSpan); + + long startTime = 1514764800000L; + final int bucketCount = 5; + + // Open the job + openJob(job.getId()); + + // write some buckets of data + postData(job.getId(), generateData(startTime, bucketSpan, bucketCount, bucketIndex -> randomIntBetween(100, 200)) + .stream().collect(Collectors.joining())); + + String calendarId = "test-calendar-open-job-update"; + + // Create a new calendar referencing groupName + putCalendar(calendarId, Collections.singletonList(groupName), "testAddOpenedJobToGroupWithCalendar calendar"); + + // Put events in the calendar + List events = new ArrayList<>(); + long eventStartTime = startTime + (bucketCount + 1) * bucketSpan.millis(); + long eventEndTime = eventStartTime + (long)(1.5 * bucketSpan.millis()); + events.add(new ScheduledEvent.Builder().description("Some Event") + .startTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(eventStartTime), ZoneOffset.UTC)) + .endTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(eventEndTime), ZoneOffset.UTC)) + .calendarId(calendarId).build()); + + postScheduledEvents(calendarId, events); + + // Update the job to be a member of the group + UpdateJobAction.Request jobUpdateRequest = new UpdateJobAction.Request(job.getId(), + new JobUpdate.Builder(job.getId()).setGroups(Collections.singletonList(groupName)).build()); + client().execute(UpdateJobAction.INSTANCE, jobUpdateRequest).actionGet(); + + // Wait until the notification that the job was updated is indexed + assertBusy(() -> { + SearchResponse searchResponse = client().prepareSearch(".ml-notifications") + .setSize(1) + .addSort("timestamp", SortOrder.DESC) + .setQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("job_id", job.getId())) + .filter(QueryBuilders.termQuery("level", "info")) + ).get(); + SearchHit[] hits = searchResponse.getHits().getHits(); + assertThat(hits.length, equalTo(1)); + assertThat(hits[0].getSourceAsMap().get("message"), equalTo("Job updated: [groups]")); + }); + + // write some more buckets of data that cover the scheduled event period + postData(job.getId(), generateData(startTime + bucketCount * bucketSpan.millis(), bucketSpan, 5, + bucketIndex -> randomIntBetween(100, 200)) + .stream().collect(Collectors.joining())); + // and close + closeJob(job.getId()); + + GetBucketsAction.Request getBucketsRequest = new GetBucketsAction.Request(job.getId()); + List buckets = getBuckets(getBucketsRequest); + + // the first 6 buckets have no events + for (int i=0; i<=bucketCount; i++) { + assertEquals(0, buckets.get(i).getScheduledEvents().size()); + } + // 7th and 8th buckets have the event but the last one does not + assertEquals(1, buckets.get(6).getScheduledEvents().size()); + assertEquals("Some Event", buckets.get(6).getScheduledEvents().get(0)); + assertEquals(1, buckets.get(7).getScheduledEvents().size()); + assertEquals("Some Event", buckets.get(7).getScheduledEvents().get(0)); + assertEquals(0, buckets.get(8).getScheduledEvents().size()); + } + private Job.Builder createJob(String jobId, TimeValue bucketSpan) { Detector.Builder detector = new Detector.Builder("count", null); AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); From da6b61e8ef912fcb89c03e69f322ee517d879f06 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 17 Aug 2018 08:13:16 -0700 Subject: [PATCH 34/46] Make Geo Context Mapping Parsing More Strict (#32821) Currently, if geo context is represented by something other than geo_point or an object with lat and lon fields, the parsing of it as a geo context can result in ignoring the context altogether, returning confusing errors such as number_format_exception or trying to parse the number specifying as long-encoded hash code. It would also fail if the geo_point was stored. This commit makes the mapping parsing more strict and will fail during mapping update or index creation if the geo context doesn't point to a geo_point field. Supersedes #32412 Closes #32202 --- .../migration/migrate_7_0/search.asciidoc | 3 + .../index/mapper/MapperService.java | 3 + .../completion/context/ContextMapping.java | 29 +++++++++ .../completion/context/ContextMappings.java | 8 ++- .../completion/context/GeoContextMapping.java | 40 +++++++++++- .../ContextCompletionSuggestSearchIT.java | 20 +++++- .../completion/GeoContextMappingTests.java | 65 +++++++++++++++++++ 7 files changed, 161 insertions(+), 7 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 11f4650912723..094294d85304d 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -92,6 +92,9 @@ deprecated in 6.x, has been removed. Context enabled suggestion queries without contexts have to visit every suggestion, which degrades the search performance considerably. +For geo context the value of the `path` parameter is now validated against the mapping, +and the context is only accepted if `path` points to a field with `geo_point` type. + ==== Semantics changed for `max_concurrent_shard_requests` `max_concurrent_shard_requests` used to limit the total number of concurrent shard diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 921e472c94ff1..9cd8ef1f6ac67 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -52,6 +52,7 @@ import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.search.suggest.completion.context.ContextMapping; import java.io.Closeable; import java.io.IOException; @@ -421,6 +422,8 @@ private synchronized Map internalMerge(@Nullable Documen MapperMergeValidator.validateFieldReferences(fieldMappers, fieldAliasMappers, fullPathObjectMappers, fieldTypes); + ContextMapping.validateContextPaths(indexSettings.getIndexVersionCreated(), fieldMappers, fieldTypes::get); + if (reason == MergeReason.MAPPING_UPDATE) { // this check will only be performed on the master node when there is // a call to the update mapping API. For all other cases like diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java index 1aa82eeb2190a..0d0c7e9458910 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.suggest.completion.context; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -28,6 +29,8 @@ import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.CompletionFieldMapper; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.ParseContext; import java.io.IOException; @@ -35,6 +38,7 @@ import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.function.Function; /** * A {@link ContextMapping} defines criteria that can be used to @@ -131,6 +135,31 @@ public final List parseQueryContext(XContentParser parser) */ protected abstract XContentBuilder toInnerXContent(XContentBuilder builder, Params params) throws IOException; + /** + * Checks if the current context is consistent with the rest of the fields. For example, the GeoContext + * should check that the field that it points to has the correct type. + */ + protected void validateReferences(Version indexVersionCreated, Function fieldResolver) { + // No validation is required by default + } + + /** + * Verifies that all field paths specified in contexts point to the fields with correct mappings + */ + public static void validateContextPaths(Version indexVersionCreated, List fieldMappers, + Function fieldResolver) { + for (FieldMapper fieldMapper : fieldMappers) { + if (CompletionFieldMapper.CONTENT_TYPE.equals(fieldMapper.typeName())) { + CompletionFieldMapper.CompletionFieldType fieldType = ((CompletionFieldMapper) fieldMapper).fieldType(); + if (fieldType.hasContextMappings()) { + for (ContextMapping context : fieldType.getContextMappings()) { + context.validateReferences(indexVersionCreated, fieldResolver); + } + } + } + } + } + @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(FIELD_NAME, name); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java index 3c0f0e80cebdb..b4c3276b946b2 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java @@ -37,6 +37,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -50,7 +51,7 @@ * and creates context queries for defined {@link ContextMapping}s * for a {@link CompletionFieldMapper} */ -public class ContextMappings implements ToXContent { +public class ContextMappings implements ToXContent, Iterable> { private final List> contextMappings; private final Map> contextNameMap; @@ -97,6 +98,11 @@ public void addField(ParseContext.Document document, String name, String input, document.add(new TypedContextField(name, input, weight, contexts, document)); } + @Override + public Iterator> iterator() { + return contextMappings.iterator(); + } + /** * Field prepends context values with a suggestion * Context values are associated with a type, denoted by diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index 48aaf705099da..938c4963620ed 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -19,12 +19,17 @@ package org.elasticsearch.search.suggest.completion.context; +import org.apache.logging.log4j.LogManager; +import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -42,6 +47,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.Function; import java.util.stream.Collectors; import static org.elasticsearch.common.geo.GeoHashUtils.addNeighbors; @@ -69,6 +75,8 @@ public class GeoContextMapping extends ContextMapping { static final String CONTEXT_PRECISION = "precision"; static final String CONTEXT_NEIGHBOURS = "neighbours"; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(GeoContextMapping.class)); + private final int precision; private final String fieldName; @@ -205,11 +213,11 @@ public Set parseContext(Document document) { for (IndexableField field : fields) { if (field instanceof StringField) { spare.resetFromString(field.stringValue()); - } else { - // todo return this to .stringValue() once LatLonPoint implements it + geohashes.add(spare.geohash()); + } else if (field instanceof LatLonPoint || field instanceof LatLonDocValuesField) { spare.resetFromIndexableField(field); + geohashes.add(spare.geohash()); } - geohashes.add(spare.geohash()); } } } @@ -279,6 +287,32 @@ public List toInternalQueryContexts(List return internalQueryContextList; } + @Override + protected void validateReferences(Version indexVersionCreated, Function fieldResolver) { + if (fieldName != null) { + MappedFieldType mappedFieldType = fieldResolver.apply(fieldName); + if (mappedFieldType == null) { + if (indexVersionCreated.before(Version.V_7_0_0_alpha1)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping", + "field [{}] referenced in context [{}] is not defined in the mapping", fieldName, name); + } else { + throw new ElasticsearchParseException( + "field [{}] referenced in context [{}] is not defined in the mapping", fieldName, name); + } + } else if (GeoPointFieldMapper.CONTENT_TYPE.equals(mappedFieldType.typeName()) == false) { + if (indexVersionCreated.before(Version.V_7_0_0_alpha1)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping", + "field [{}] referenced in context [{}] must be mapped to geo_point, found [{}]", + fieldName, name, mappedFieldType.typeName()); + } else { + throw new ElasticsearchParseException( + "field [{}] referenced in context [{}] must be mapped to geo_point, found [{}]", + fieldName, name, mappedFieldType.typeName()); + } + } + } + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index d95db778a6a3f..44c49ace5de8f 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -493,15 +493,24 @@ public void testGeoNeighbours() throws Exception { } public void testGeoField() throws Exception { -// Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5); -// Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder mapping = jsonBuilder(); mapping.startObject(); mapping.startObject(TYPE); mapping.startObject("properties"); + mapping.startObject("location"); + mapping.startObject("properties"); mapping.startObject("pin"); mapping.field("type", "geo_point"); + // Enable store and disable indexing sometimes + if (randomBoolean()) { + mapping.field("store", "true"); + } + if (randomBoolean()) { + mapping.field("index", "false"); + } + mapping.endObject(); // pin mapping.endObject(); + mapping.endObject(); // location mapping.startObject(FIELD); mapping.field("type", "completion"); mapping.field("analyzer", "simple"); @@ -510,7 +519,7 @@ public void testGeoField() throws Exception { mapping.startObject(); mapping.field("name", "st"); mapping.field("type", "geo"); - mapping.field("path", "pin"); + mapping.field("path", "location.pin"); mapping.field("precision", 5); mapping.endObject(); mapping.endArray(); @@ -524,7 +533,9 @@ public void testGeoField() throws Exception { XContentBuilder source1 = jsonBuilder() .startObject() + .startObject("location") .latlon("pin", 52.529172, 13.407333) + .endObject() .startObject(FIELD) .array("input", "Hotel Amsterdam in Berlin") .endObject() @@ -533,7 +544,9 @@ public void testGeoField() throws Exception { XContentBuilder source2 = jsonBuilder() .startObject() + .startObject("location") .latlon("pin", 52.363389, 4.888695) + .endObject() .startObject(FIELD) .array("input", "Hotel Berlin in Amsterdam") .endObject() @@ -600,6 +613,7 @@ public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBu private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException { createIndexAndMappingAndSettings(Settings.EMPTY, completionMappingBuilder); } + private void createIndexAndMappingAndSettings(Settings settings, CompletionMappingBuilder completionMappingBuilder) throws IOException { XContentBuilder mapping = jsonBuilder().startObject() .startObject(TYPE).startObject("properties") diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java index 56ff157ec718d..a745384eb3edb 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.suggest.completion; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -200,6 +201,70 @@ public void testIndexingWithMultipleContexts() throws Exception { assertContextSuggestFields(fields, 3); } + public void testMalformedGeoField() throws Exception { + XContentBuilder mapping = jsonBuilder(); + mapping.startObject(); + mapping.startObject("type1"); + mapping.startObject("properties"); + mapping.startObject("pin"); + String type = randomFrom("text", "keyword", "long"); + mapping.field("type", type); + mapping.endObject(); + mapping.startObject("suggestion"); + mapping.field("type", "completion"); + mapping.field("analyzer", "simple"); + + mapping.startArray("contexts"); + mapping.startObject(); + mapping.field("name", "st"); + mapping.field("type", "geo"); + mapping.field("path", "pin"); + mapping.field("precision", 5); + mapping.endObject(); + mapping.endArray(); + + mapping.endObject(); + + mapping.endObject(); + mapping.endObject(); + mapping.endObject(); + + ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, + () -> createIndex("test", Settings.EMPTY, "type1", mapping)); + + assertThat(ex.getMessage(), equalTo("field [pin] referenced in context [st] must be mapped to geo_point, found [" + type + "]")); + } + + public void testMissingGeoField() throws Exception { + XContentBuilder mapping = jsonBuilder(); + mapping.startObject(); + mapping.startObject("type1"); + mapping.startObject("properties"); + mapping.startObject("suggestion"); + mapping.field("type", "completion"); + mapping.field("analyzer", "simple"); + + mapping.startArray("contexts"); + mapping.startObject(); + mapping.field("name", "st"); + mapping.field("type", "geo"); + mapping.field("path", "pin"); + mapping.field("precision", 5); + mapping.endObject(); + mapping.endArray(); + + mapping.endObject(); + + mapping.endObject(); + mapping.endObject(); + mapping.endObject(); + + ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, + () -> createIndex("test", Settings.EMPTY, "type1", mapping)); + + assertThat(ex.getMessage(), equalTo("field [pin] referenced in context [st] is not defined in the mapping")); + } + public void testParsingQueryContextBasic() throws Exception { XContentBuilder builder = jsonBuilder().value("ezs42e44yx96"); XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); From e3aa68b0a9f72db7da5ec1fbba48da87edad6c7d Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Fri, 17 Aug 2018 18:23:13 +0300 Subject: [PATCH 35/46] [TEST] Run pre 6.4 nodes in non-FIPS JVMs (#32901) Elasticsearch versions earlier than 6.4.0 cannot properly run in a FIPS 140 JVM. This commit ensures that we use a non-FIPS JVM for nodes that we spin up in BWC tests even when we're testing FIPS. --- .../groovy/org/elasticsearch/gradle/test/NodeInfo.groovy | 6 ++++++ .../qa/full-cluster-restart/with-system-key/build.gradle | 8 -------- x-pack/qa/rolling-upgrade/with-system-key/build.gradle | 9 --------- 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 0dd56b863324f..aaf4e468182a9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -177,6 +177,12 @@ class NodeInfo { javaVersion = 8 } else if (nodeVersion.onOrAfter("6.2.0") && nodeVersion.before("6.3.0")) { javaVersion = 9 + } else if (project.inFipsJvm && nodeVersion.onOrAfter("6.3.0") && nodeVersion.before("6.4.0")) { + /* + * Elasticsearch versions before 6.4.0 cannot be run in a FIPS-140 JVM. If we're running + * bwc tests in a FIPS-140 JVM, ensure that the pre v6.4.0 nodes use a Java 10 JVM instead. + */ + javaVersion = 10 } args.addAll("-E", "node.portsfile=true") diff --git a/x-pack/qa/full-cluster-restart/with-system-key/build.gradle b/x-pack/qa/full-cluster-restart/with-system-key/build.gradle index 928280b6584bd..e69de29bb2d1d 100644 --- a/x-pack/qa/full-cluster-restart/with-system-key/build.gradle +++ b/x-pack/qa/full-cluster-restart/with-system-key/build.gradle @@ -1,8 +0,0 @@ -import org.elasticsearch.gradle.test.RestIntegTestTask - -// Skip test on FIPS FIXME https://github.com/elastic/elasticsearch/issues/32737 -if (project.inFipsJvm) { - tasks.withType(RestIntegTestTask) { - enabled = false - } -} diff --git a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle b/x-pack/qa/rolling-upgrade/with-system-key/build.gradle index 5aaa1ed1eff9b..03505e01dedd8 100644 --- a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle +++ b/x-pack/qa/rolling-upgrade/with-system-key/build.gradle @@ -1,10 +1 @@ -import org.elasticsearch.gradle.test.RestIntegTestTask - -// Skip test on FIPS FIXME https://github.com/elastic/elasticsearch/issues/32737 -if (project.inFipsJvm) { - tasks.withType(RestIntegTestTask) { - enabled = false - } -} - group = "${group}.x-pack.qa.rolling-upgrade.with-system-key" From c5de9ec79d4e60714761765d4581de863ed97f64 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 17 Aug 2018 09:18:08 -0700 Subject: [PATCH 36/46] [DOCS] Splits the roles API documentation into multiple pages (#32794) --- x-pack/docs/build.gradle | 14 ++ x-pack/docs/en/rest-api/security.asciidoc | 10 +- .../security/clear-roles-cache.asciidoc | 39 ++++ .../rest-api/security/create-roles.asciidoc | 102 +++++++++ .../rest-api/security/delete-roles.asciidoc | 53 +++++ .../en/rest-api/security/get-roles.asciidoc | 85 +++++++ .../docs/en/rest-api/security/roles.asciidoc | 208 +----------------- .../xpack.security.clear_cached_roles.json | 2 +- .../api/xpack.security.delete_role.json | 2 +- .../api/xpack.security.get_role.json | 2 +- .../api/xpack.security.put_role.json | 2 +- 11 files changed, 311 insertions(+), 208 deletions(-) create mode 100644 x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc create mode 100644 x-pack/docs/en/rest-api/security/create-roles.asciidoc create mode 100644 x-pack/docs/en/rest-api/security/delete-roles.asciidoc create mode 100644 x-pack/docs/en/rest-api/security/get-roles.asciidoc diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 70f6061985a6a..48ac4ba565e55 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -722,3 +722,17 @@ setups['sensor_prefab_data'] = ''' {"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} ''' +setups['admin_role'] = ''' + - do: + xpack.security.put_role: + name: "my_admin_role" + body: > + { + "cluster": ["all"], + "indices": [ + {"names": ["index1", "index2" ], "privileges": ["all"], "field_security" : {"grant" : [ "title", "body" ]}} + ], + "run_as": [ "other_user" ], + "metadata" : {"version": 1} + } +''' diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index 227e343192a50..4d59cb22daa06 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -2,20 +2,26 @@ [[security-api]] == Security APIs +You can use the following APIs to perform {security} activities. + * <> * <> * <> -* <> * <> * <> * <> * <> +include::security/roles.asciidoc[] + include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] include::security/clear-cache.asciidoc[] +include::security/clear-roles-cache.asciidoc[] +include::security/create-roles.asciidoc[] +include::security/delete-roles.asciidoc[] +include::security/get-roles.asciidoc[] include::security/privileges.asciidoc[] -include::security/roles.asciidoc[] include::security/role-mapping.asciidoc[] include::security/ssl.asciidoc[] include::security/tokens.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc b/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc new file mode 100644 index 0000000000000..591d7eb2d11e4 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc @@ -0,0 +1,39 @@ +[role="xpack"] +[[security-api-clear-role-cache]] +=== Clear roles cache API + +Evicts roles from the native role cache. + +==== Request + +`POST /_xpack/security/role//_clear_cache` + +==== Description + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`name`:: + (string) The name of the role. + + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster +privilege. + + +==== Examples + +The clear roles cache API evicts roles from the native role cache. For example, +to clear the cache for `my_admin_role`: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role/my_admin_role/_clear_cache +-------------------------------------------------- +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/create-roles.asciidoc b/x-pack/docs/en/rest-api/security/create-roles.asciidoc new file mode 100644 index 0000000000000..749676b4e8360 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/create-roles.asciidoc @@ -0,0 +1,102 @@ +[role="xpack"] +[[security-api-put-role]] +=== Create roles API + +Adds roles in the native realm. + +==== Request + +`POST /_xpack/security/role/` + + +`PUT /_xpack/security/role/` + + +==== Description + +The role API is generally the preferred way to manage roles, rather than using +file-based role management. For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + + +==== Path Parameters + +`name`:: + (string) The name of the role. + + +==== Request Body + +The following parameters can be specified in the body of a PUT or POST request +and pertain to adding a role: + +`cluster`:: (list) A list of cluster privileges. These privileges define the +cluster level actions that users with this role are able to execute. + +`indices`:: (list) A list of indices permissions entries. +`field_security`::: (list) The document fields that the owners of the role have +read access to. For more information, see +{stack-ov}/field-and-document-access-control.html[Setting up field and document level security]. +`names` (required)::: (list) A list of indices (or index name patterns) to which the +permissions in this entry apply. +`privileges`(required)::: (list) The index level privileges that the owners of the role +have on the specified indices. +`query`::: A search query that defines the documents the owners of the role have +read access to. A document within the specified indices must match this query in +order for it to be accessible by the owners of the role. + +`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys +that begin with `_` are reserved for system usage. + +`run_as`:: (list) A list of users that the owners of this role can impersonate. +For more information, see +{stack-ov}/run-as-privilege.html[Submitting requests on behalf of other users]. + +For more information, see {stack-ov}/defining-roles.html[Defining roles]. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster +privilege. + + +==== Examples + +The following example adds a role called `my_admin_role`: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role/my_admin_role +{ + "cluster": ["all"], + "indices": [ + { + "names": [ "index1", "index2" ], + "privileges": ["all"], + "field_security" : { // optional + "grant" : [ "title", "body" ] + }, + "query": "{\"match\": {\"title\": \"foo\"}}" // optional + } + ], + "run_as": [ "other_user" ], // optional + "metadata" : { // optional + "version" : 1 + } +} +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure that shows whether the role has been +created or updated. + +[source,js] +-------------------------------------------------- +{ + "role": { + "created": true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing role is updated, `created` is set to false. diff --git a/x-pack/docs/en/rest-api/security/delete-roles.asciidoc b/x-pack/docs/en/rest-api/security/delete-roles.asciidoc new file mode 100644 index 0000000000000..db42493ca0fb6 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-roles.asciidoc @@ -0,0 +1,53 @@ +[role="xpack"] +[[security-api-delete-role]] +=== Delete roles API + +Removes roles in the native realm. + +==== Request + +`DELETE /_xpack/security/role/` + + +==== Description + +The Roles API is generally the preferred way to manage roles, rather than using +file-based role management. For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + + +==== Path Parameters + +`name`:: + (string) The name of the role. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster +privilege. + + +==== Examples + +The following example deletes a `my_admin_role` role: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/role/my_admin_role +-------------------------------------------------- +// CONSOLE +// TEST[setup:admin_role] + +If the role is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE + diff --git a/x-pack/docs/en/rest-api/security/get-roles.asciidoc b/x-pack/docs/en/rest-api/security/get-roles.asciidoc new file mode 100644 index 0000000000000..fa6e91b519b6f --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-roles.asciidoc @@ -0,0 +1,85 @@ +[role="xpack"] +[[security-api-get-role]] +=== Get roles API + +Retrieves roles in the native realm. + +==== Request + +`GET /_xpack/security/role` + + +`GET /_xpack/security/role/` + + +==== Description + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`name`:: + (string) The name of the role. You can specify multiple roles as a + comma-separated list. If you do not specify this parameter, the API + returns information about all roles. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster +privilege. + + +==== Examples + +The following example retrieves information about the `my_admin_role` role in +the native realm: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role/my_admin_role +-------------------------------------------------- +// CONSOLE +// TEST[setup:admin_role] + +A successful call returns an array of roles with the JSON representation of the +role. If the role is not defined in the native realm, the request returns 404. + +[source,js] +-------------------------------------------------- +{ + "my_admin_role": { + "cluster" : [ "all" ], + "indices" : [ + { + "names" : [ "index1", "index2" ], + "privileges" : [ "all" ], + "field_security" : { + "grant" : [ "title", "body" ]} + } + ], + "applications" : [ ], + "run_as" : [ "other_user" ], + "metadata" : { + "version" : 1 + }, + "transient_metadata": { + "enabled": true + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +To retrieve all roles, omit the role name: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +NOTE: If single role is requested, that role is returned as the response. When +requesting multiple roles, an object is returned holding the found roles, each +keyed by the relevant role name. diff --git a/x-pack/docs/en/rest-api/security/roles.asciidoc b/x-pack/docs/en/rest-api/security/roles.asciidoc index 28c09c560ec3c..3853963081e39 100644 --- a/x-pack/docs/en/rest-api/security/roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/roles.asciidoc @@ -1,205 +1,9 @@ -[role="xpack"] +[float] [[security-api-roles]] -=== Role Management APIs +=== Roles -The Roles API enables you to add, remove, and retrieve roles in the `native` -realm. +You can use the following APIs to add, remove, and retrieve roles in the native realm: -==== Request - -`GET /_xpack/security/role` + - -`GET /_xpack/security/role/` + - -`DELETE /_xpack/security/role/` + - -`POST /_xpack/security/role//_clear_cache` + - -`POST /_xpack/security/role/` + - -`PUT /_xpack/security/role/` - - -==== Description - -The Roles API is generally the preferred way to manage roles, rather than using -file-based role management. For more information, see -{xpack-ref}/authorization.html[Configuring Role-based Access Control]. - - -==== Path Parameters - -`name`:: - (string) The name of the role. If you do not specify this parameter, the - Get Roles API returns information about all roles. - - -==== Request Body - -The following parameters can be specified in the body of a PUT or POST request -and pertain to adding a role: - -`cluster`:: (list) A list of cluster privileges. These privileges define the -cluster level actions that users with this role are able to execute. - -`indices`:: (list) A list of indices permissions entries. -`field_security`::: (list) The document fields that the owners of the role have -read access to. For more information, see -{xpack-ref}/field-and-document-access-control.html[Setting Up Field and Document Level Security]. -`names` (required)::: (list) A list of indices (or index name patterns) to which the -permissions in this entry apply. -`privileges`(required)::: (list) The index level privileges that the owners of the role -have on the specified indices. -`query`::: A search query that defines the documents the owners of the role have -read access to. A document within the specified indices must match this query in -order for it to be accessible by the owners of the role. - -`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys -that begin with `_` are reserved for system usage. - -`run_as`:: (list) A list of users that the owners of this role can impersonate. -For more information, see -{xpack-ref}/run-as-privilege.html[Submitting Requests on Behalf of Other Users]. - -For more information, see {xpack-ref}/defining-roles.html[Defining Roles]. - - -==== Authorization - -To use this API, you must have at least the `manage_security` cluster -privilege. - - -==== Examples - -[[security-api-put-role]] -To add a role, submit a PUT or POST request to the `/_xpack/security/role/` -endpoint: - -[source,js] --------------------------------------------------- -POST /_xpack/security/role/my_admin_role -{ - "cluster": ["all"], - "indices": [ - { - "names": [ "index1", "index2" ], - "privileges": ["all"], - "field_security" : { // optional - "grant" : [ "title", "body" ] - }, - "query": "{\"match\": {\"title\": \"foo\"}}" // optional - } - ], - "run_as": [ "other_user" ], // optional - "metadata" : { // optional - "version" : 1 - } -} --------------------------------------------------- -// CONSOLE - -A successful call returns a JSON structure that shows whether the role has been -created or updated. - -[source,js] --------------------------------------------------- -{ - "role": { - "created": true <1> - } -} --------------------------------------------------- -// TESTRESPONSE -<1> When an existing role is updated, `created` is set to false. - -[[security-api-get-role]] -To retrieve a role from the `native` Security realm, issue a GET request to the -`/_xpack/security/role/` endpoint: - -[source,js] --------------------------------------------------- -GET /_xpack/security/role/my_admin_role --------------------------------------------------- -// CONSOLE -// TEST[continued] - -A successful call returns an array of roles with the JSON representation of the -role. If the role is not defined in the `native` realm, the request 404s. - -[source,js] --------------------------------------------------- -{ - "my_admin_role": { - "cluster" : [ "all" ], - "indices" : [ { - "names" : [ "index1", "index2" ], - "privileges" : [ "all" ], - "field_security" : { - "grant" : [ "title", "body" ] - }, - "query" : "{\"match\": {\"title\": \"foo\"}}" - } ], - "applications" : [ ], - "run_as" : [ "other_user" ], - "metadata" : { - "version" : 1 - }, - "transient_metadata": { - "enabled": true - } - } -} --------------------------------------------------- -// TESTRESPONSE - -You can specify multiple roles as a comma-separated list. To retrieve all roles, -omit the role name. - -[source,js] --------------------------------------------------- -# Retrieve roles "r1", "r2", and "my_admin_role" -GET /_xpack/security/role/r1,r2,my_admin_role - -# Retrieve all roles -GET /_xpack/security/role --------------------------------------------------- -// CONSOLE -// TEST[continued] - -NOTE: If single role is requested, that role is returned as the response. When -requesting multiple roles, an object is returned holding the found roles, each -keyed by the relevant role name. - -[[security-api-delete-role]] -To delete a role, submit a DELETE request to the `/_xpack/security/role/` -endpoint: - -[source,js] --------------------------------------------------- -DELETE /_xpack/security/role/my_admin_role --------------------------------------------------- -// CONSOLE -// TEST[continued] - -If the role is successfully deleted, the request returns `{"found": true}`. -Otherwise, `found` is set to false. - -[source,js] --------------------------------------------------- -{ - "found" : true -} --------------------------------------------------- -// TESTRESPONSE - -[[security-api-clear-role-cache]] -The Clear Roles Cache API evicts roles from the native role cache. To clear the -cache for a role, submit a POST request `/_xpack/security/role//_clear_cache` -endpoint: - -[source,js] --------------------------------------------------- -POST /_xpack/security/role/my_admin_role/_clear_cache --------------------------------------------------- -// CONSOLE +* <>, <> +* <> +* <> \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json index c94333325b127..d945ebe3247e0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json @@ -1,6 +1,6 @@ { "xpack.security.clear_cached_roles": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-clear-role-cache", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html", "methods": [ "POST" ], "url": { "path": "/_xpack/security/role/{name}/_clear_cache", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json index 4351b1bc847a1..881105d60b8b3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json @@ -1,6 +1,6 @@ { "xpack.security.delete_role": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-delete-role", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html", "methods": [ "DELETE" ], "url": { "path": "/_xpack/security/role/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json index 3479c911ccdce..67bdbb8a911a2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json @@ -1,6 +1,6 @@ { "xpack.security.get_role": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-get-role", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html", "methods": [ "GET" ], "url": { "path": "/_xpack/security/role/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json index 4152975189e24..63ef5ee37867c 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json @@ -1,6 +1,6 @@ { "xpack.security.put_role": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-put-role", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/role/{name}", From 46c35db1dfb7fa59f47a420545ab76f342ad701d Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 17 Aug 2018 17:52:29 +0100 Subject: [PATCH 37/46] [ML][TEST] Fix BasicRenormalizationIT after adding multibucket feature As the multibucket feature was merged in, this test hit a side effect which means buckets trailing an anomaly could become anomalous. This commit fixes the problem by filtering low score records when we request them. --- .../xpack/ml/integration/BasicRenormalizationIT.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java index 80afdeff82ad8..cc5a9f4f1b469 100644 --- a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java +++ b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; @@ -36,7 +37,11 @@ public void testDefaultRenormalization() throws Exception { String jobId = "basic-renormalization-it-test-default-renormalization-job"; createAndRunJob(jobId, null); - List records = getRecords(jobId); + GetRecordsAction.Request getRecordsRequest = new GetRecordsAction.Request(jobId); + // Setting the record score to 10.0, to avoid the low score records due to multibucket trailing effect + getRecordsRequest.setRecordScore(10.0); + + List records = getRecords(getRecordsRequest); assertThat(records.size(), equalTo(2)); AnomalyRecord laterRecord = records.get(0); assertThat(laterRecord.getActual().get(0), equalTo(100.0)); From a608205510519c3ec7a8554c1f8f16744a6ccfce Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 17 Aug 2018 10:20:28 -0700 Subject: [PATCH 38/46] [DOCS] Fixes links to role management APIs --- x-pack/docs/en/security/authorization/managing-roles.asciidoc | 2 +- x-pack/docs/en/security/authorization/mapping-roles.asciidoc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index b8a01aa4519c6..aa872a88ce0a4 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -130,7 +130,7 @@ manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. The _Role Management APIs_ enable you to add, update, remove and retrieve roles dynamically. When you use the APIs to manage roles in the `native` realm, the roles are stored in an internal {es} index. For more information and examples, -see {ref}/security-api-roles.html[Role Management APIs]. +see {ref}/security-api.html#security-api-roles.html[role management APIs]. [float] [[roles-management-file]] diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index cf8373a65f335..9626571dbd5b4 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -18,7 +18,7 @@ the API, and other roles that are mapped through files. When you use role-mappings, you assign existing roles to users. The available roles should either be added using the -{ref}/security-api-roles.html[Role Management APIs] or defined in the +{ref}/security-api.html#security-api-roles.html[role management APIs] or defined in the <>. Either role-mapping method can use either role management method. For example, when you use the role mapping API, you are able to map users to both API-managed roles and file-managed roles From 899e94a29bc8a4e7a9497caf12e5ec153d8f8870 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Fri, 17 Aug 2018 13:33:12 -0400 Subject: [PATCH 39/46] [Docs] Tweaks and fixes to rollup docs - Missing links to new IndexCaps API - Incorrect security permissions on IndexCaps API - GetJobs API must supply a job (or `_all`), omitting throws error - Link to search/agg limitations from RollupSearch API - Tweak URLs in quick reference - Formatting of overview page --- x-pack/docs/en/rest-api/rollup-api.asciidoc | 2 ++ .../docs/en/rest-api/rollup/rollup-caps.asciidoc | 4 ++-- .../en/rest-api/rollup/rollup-index-caps.asciidoc | 4 +--- .../en/rest-api/rollup/rollup-job-config.asciidoc | 6 ++++++ .../en/rest-api/rollup/rollup-search.asciidoc | 2 +- x-pack/docs/en/rollup/api-quickref.asciidoc | 15 ++++++++------- x-pack/docs/en/rollup/overview.asciidoc | 4 ++++ 7 files changed, 24 insertions(+), 13 deletions(-) diff --git a/x-pack/docs/en/rest-api/rollup-api.asciidoc b/x-pack/docs/en/rest-api/rollup-api.asciidoc index f1cd7c285a733..9a8ec00d77a0c 100644 --- a/x-pack/docs/en/rest-api/rollup-api.asciidoc +++ b/x-pack/docs/en/rest-api/rollup-api.asciidoc @@ -16,6 +16,7 @@ === Data * <> +* <> [float] [[rollup-search-endpoint]] @@ -31,5 +32,6 @@ include::rollup/put-job.asciidoc[] include::rollup/start-job.asciidoc[] include::rollup/stop-job.asciidoc[] include::rollup/rollup-caps.asciidoc[] +include::rollup/rollup-index-caps.asciidoc[] include::rollup/rollup-search.asciidoc[] include::rollup/rollup-job-config.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc index f770adf1f0d1c..1f233f195a09e 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc +++ b/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc @@ -27,8 +27,8 @@ live? ==== Path Parameters `index`:: - (string) Index, indices or index-pattern to return rollup capabilities for. If omitted (or `_all` is used) all available - rollup job capabilities will be returned + (string) Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch + rollup capabilities from all jobs ==== Request Body diff --git a/x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc index 4636d9775e9d3..e5ca70cd59cda 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc +++ b/x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc @@ -26,15 +26,13 @@ This API will allow you to determine: `index`:: (string) Index or index-pattern of concrete rollup indices to check for capabilities. - - ==== Request Body There is no request body for the Get Jobs API. ==== Authorization -You must have `monitor`, `monitor_rollup`, `manage` or `manage_rollup` cluster privileges to use this API. +You must have the `read` index privilege on the index that stores the rollup results. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. diff --git a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc index ef0ea6f00f7ce..2ba92b6b59ea6 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc +++ b/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc @@ -82,6 +82,12 @@ In the above example, there are several pieces of logistical configuration for t will tend to execute faster, but will require more memory during processing. This has no effect on how the data is rolled up, it is merely used for tweaking the speed/memory cost of the indexer. +[NOTE] +The `index_pattern` cannot be a pattern that would also match the destination `rollup_index`. E.g. the pattern +`"foo-*"` would match the rollup index `"foo-rollup"`. This causes problems because the rollup job would attempt +to rollup it's own data at runtime. If you attempt to configure a pattern that matches the `rollup_index`, an exception +will be thrown to prevent this behavior. + [[rollup-groups-config]] ==== Grouping Config diff --git a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc index 470cbc4eaf57d..f595d52ec10a1 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc +++ b/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc @@ -34,7 +34,7 @@ or using `_all`, is not permitted The request body supports a subset of features from the regular Search API. It supports: -- `query` param for specifying an DSL query, subject to some limitations +- `query` param for specifying an DSL query, subject to some limitations (see <> and <> - `aggregations` param for specifying aggregations Functionality that is not available: diff --git a/x-pack/docs/en/rollup/api-quickref.asciidoc b/x-pack/docs/en/rollup/api-quickref.asciidoc index 937c6a84e5e14..5e99f1c69841c 100644 --- a/x-pack/docs/en/rollup/api-quickref.asciidoc +++ b/x-pack/docs/en/rollup/api-quickref.asciidoc @@ -15,18 +15,19 @@ Most {rollup} endpoints have the following base: [[rollup-api-jobs]] === /job/ -* {ref}/rollup-put-job.html[PUT /job/+++]: Create a job -* {ref}/rollup-get-job.html[GET /job]: List jobs -* {ref}/rollup-get-job.html[GET /job/+++]: Get job details -* {ref}/rollup-start-job.html[POST /job//_start]: Start a job -* {ref}/rollup-stop-job.html[POST /job//_stop]: Stop a job -* {ref}/rollup-delete-job.html[DELETE /job/+++]: Delete a job +* {ref}/rollup-put-job.html[PUT /_xpack/rollup/job/+++]: Create a job +* {ref}/rollup-get-job.html[GET /_xpack/rollup/job]: List jobs +* {ref}/rollup-get-job.html[GET /_xpack/rollup/job/+++]: Get job details +* {ref}/rollup-start-job.html[POST /_xpack/rollup/job//_start]: Start a job +* {ref}/rollup-stop-job.html[POST /_xpack/rollup/job//_stop]: Stop a job +* {ref}/rollup-delete-job.html[DELETE /_xpack/rollup/job/+++]: Delete a job [float] [[rollup-api-data]] === /data/ -* {ref}/rollup-get-rollup-caps.html[GET /data//_rollup_caps+++]: Get Rollup Capabilities +* {ref}/rollup-get-rollup-caps.html[GET /_xpack/rollup/data//_rollup_caps+++]: Get Rollup Capabilities +* {ref}/rollup-get-rollup-index-caps.html[GET //_rollup/data/+++]: Get Rollup Index Capabilities [float] [[rollup-api-index]] diff --git a/x-pack/docs/en/rollup/overview.asciidoc b/x-pack/docs/en/rollup/overview.asciidoc index a3f29f23bd107..a9a983fbecc1d 100644 --- a/x-pack/docs/en/rollup/overview.asciidoc +++ b/x-pack/docs/en/rollup/overview.asciidoc @@ -20,6 +20,7 @@ So while the cost of storing a millisecond of sensor data from ten years ago is reading often diminishes with time. It's not useless -- it could easily contribute to a useful analysis -- but it's reduced value often leads to deletion rather than paying the fixed storage cost. +[float] === Rollup store historical data at reduced granularity That's where Rollup comes into play. The Rollup functionality summarizes old, high-granularity data into a reduced @@ -35,6 +36,7 @@ automates this process of summarizing historical data. Details about setting up and configuring Rollup are covered in <> +[float] === Rollup uses standard query DSL The Rollup feature exposes a new search endpoint (`/_rollup_search` vs the standard `/_search`) which knows how to search @@ -48,6 +50,7 @@ are covered more in <>. But if your queries, aggregations and dashboards only use the available functionality, redirecting them to historical data is trivial. +[float] === Rollup merges "live" and "rolled" data A useful feature of Rollup is the ability to query both "live", realtime data in addition to historical "rolled" data @@ -61,6 +64,7 @@ would only see data older than a month. The RollupSearch endpoint, however, sup It will take the results from both data sources and merge them together. If there is overlap between the "live" and "rolled" data, live data is preferred to increase accuracy. +[float] === Rollup is multi-interval aware Finally, Rollup is capable of intelligently utilizing the best interval available. If you've worked with summarizing From 967b1785fab89fc735791e64ec23cbd993a5e989 Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 17 Aug 2018 10:41:06 -0700 Subject: [PATCH 40/46] [DOCS] Fixes more broken links to role management APIs --- x-pack/docs/en/security/authorization/managing-roles.asciidoc | 2 +- x-pack/docs/en/security/authorization/mapping-roles.asciidoc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index aa872a88ce0a4..a7887ee664874 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -130,7 +130,7 @@ manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. The _Role Management APIs_ enable you to add, update, remove and retrieve roles dynamically. When you use the APIs to manage roles in the `native` realm, the roles are stored in an internal {es} index. For more information and examples, -see {ref}/security-api.html#security-api-roles.html[role management APIs]. +see {ref}/security-api.html#security-api-roles[role management APIs]. [float] [[roles-management-file]] diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index 9626571dbd5b4..1c6482510f0f3 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -18,7 +18,7 @@ the API, and other roles that are mapped through files. When you use role-mappings, you assign existing roles to users. The available roles should either be added using the -{ref}/security-api.html#security-api-roles.html[role management APIs] or defined in the +{ref}/security-api.html#security-api-roles[role management APIs] or defined in the <>. Either role-mapping method can use either role management method. For example, when you use the role mapping API, you are able to map users to both API-managed roles and file-managed roles From 86ffce4bbc93eadb8b3b505ef48b8bf0897cb69b Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 17 Aug 2018 13:55:42 -0400 Subject: [PATCH 41/46] TEST: Mute testRetentionPolicyChangeDuringRecovery Tracked at #32089 --- .../java/org/elasticsearch/indices/recovery/RecoveryTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 7ca9dcc82f70a..0f663eca75d73 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.HashMap; import java.util.List; @@ -74,7 +73,7 @@ public void testTranslogHistoryTransferred() throws Exception { } } - @TestLogging("_root:TRACE") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32089") public void testRetentionPolicyChangeDuringRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startPrimary(); From 647705e00a118bbafb3258d451beeee9a26c90bc Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 17 Aug 2018 15:30:31 -0500 Subject: [PATCH 42/46] Bypassing failing test PainlessDomainSplitIT#testHRDSplit (#32966) --- .../elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java b/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java index 79e9a81831fc8..0751d7307ae9a 100644 --- a/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java +++ b/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java @@ -240,6 +240,7 @@ public void testIsolated() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32966") public void testHRDSplit() throws Exception { // Create job From 1efee66d164d1d6996bae95ab7c9f4cca9c98d5e Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 17 Aug 2018 21:39:21 -0700 Subject: [PATCH 43/46] [DOCS] Creates redirects for role management APIs page --- docs/reference/redirects.asciidoc | 9 +++++++++ x-pack/docs/en/rest-api/security.asciidoc | 10 +++++++++- x-pack/docs/en/rest-api/security/role-mapping.asciidoc | 2 +- x-pack/docs/en/rest-api/security/roles.asciidoc | 9 --------- .../en/security/authorization/managing-roles.asciidoc | 2 +- .../en/security/authorization/mapping-roles.asciidoc | 2 +- 6 files changed, 21 insertions(+), 13 deletions(-) delete mode 100644 x-pack/docs/en/rest-api/security/roles.asciidoc diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index a5a8e4d008ac4..2d11d21089050 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -503,3 +503,12 @@ guide to the {painless}/index.html[Painless Scripting Language]. See the {painless}/painless-api-reference.html[Painless API Reference] in the guide to the {painless}/index.html[Painless Scripting Language]. + +[role="exclude", id="security-api-roles"] +=== Role management APIs + +You can use the following APIs to add, remove, and retrieve roles in the native realm: + +* <>, <> +* <> +* <> \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index 4d59cb22daa06..476c9b95bfda3 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -12,7 +12,15 @@ You can use the following APIs to perform {security} activities. * <> * <> -include::security/roles.asciidoc[] +[float] +[[security-role-apis]] +=== Roles + +You can use the following APIs to add, remove, and retrieve roles in the native realm: + +* <>, <> +* <> +* <> include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/role-mapping.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping.asciidoc index 3844e30c62dc0..c8006346d4e8f 100644 --- a/x-pack/docs/en/rest-api/security/role-mapping.asciidoc +++ b/x-pack/docs/en/rest-api/security/role-mapping.asciidoc @@ -22,7 +22,7 @@ Role mappings have _rules_ that identify users and a list of _roles_ that are granted to those users. NOTE: This API does not create roles. Rather, it maps users to existing roles. -Roles can be created by using <> or +Roles can be created by using <> or {xpack-ref}/defining-roles.html#roles-management-file[roles files]. The role mapping rule is a logical condition that is expressed using a JSON DSL. diff --git a/x-pack/docs/en/rest-api/security/roles.asciidoc b/x-pack/docs/en/rest-api/security/roles.asciidoc deleted file mode 100644 index 3853963081e39..0000000000000 --- a/x-pack/docs/en/rest-api/security/roles.asciidoc +++ /dev/null @@ -1,9 +0,0 @@ -[float] -[[security-api-roles]] -=== Roles - -You can use the following APIs to add, remove, and retrieve roles in the native realm: - -* <>, <> -* <> -* <> \ No newline at end of file diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index a7887ee664874..f550c900edce0 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -130,7 +130,7 @@ manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. The _Role Management APIs_ enable you to add, update, remove and retrieve roles dynamically. When you use the APIs to manage roles in the `native` realm, the roles are stored in an internal {es} index. For more information and examples, -see {ref}/security-api.html#security-api-roles[role management APIs]. +see {ref}/security-api.html#security-role-apis[role management APIs]. [float] [[roles-management-file]] diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index 1c6482510f0f3..36f3a1f27f346 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -18,7 +18,7 @@ the API, and other roles that are mapped through files. When you use role-mappings, you assign existing roles to users. The available roles should either be added using the -{ref}/security-api.html#security-api-roles[role management APIs] or defined in the +{ref}/security-api.html#security-role-apis[role management APIs] or defined in the <>. Either role-mapping method can use either role management method. For example, when you use the role mapping API, you are able to map users to both API-managed roles and file-managed roles From fb1c3990d72aa1b2272db9e37c06dfc7a422da77 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 17 Aug 2018 22:22:09 -0700 Subject: [PATCH 44/46] [DOCS] Splits the token APIs into separate pages (#32865) --- docs/reference/redirects.asciidoc | 10 ++- x-pack/docs/en/rest-api/security.asciidoc | 13 ++- .../rest-api/security/delete-tokens.asciidoc | 54 ++++++++++++ .../{tokens.asciidoc => get-tokens.asciidoc} | 86 ++++++------------- .../api/xpack.security.get_token.json | 2 +- .../api/xpack.security.invalidate_token.json | 2 +- 6 files changed, 104 insertions(+), 63 deletions(-) create mode 100644 x-pack/docs/en/rest-api/security/delete-tokens.asciidoc rename x-pack/docs/en/rest-api/security/{tokens.asciidoc => get-tokens.asciidoc} (62%) diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 2d11d21089050..948652c37e69a 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -511,4 +511,12 @@ You can use the following APIs to add, remove, and retrieve roles in the native * <>, <> * <> -* <> \ No newline at end of file +* <> + +[role="exclude",id="security-api-tokens"] +=== Token management APIs + +You can use the following APIs to create and invalidate bearer tokens for access +without requiring basic authentication: + +* <>, <> diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index 476c9b95bfda3..27e54df38b31c 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -9,7 +9,6 @@ You can use the following APIs to perform {security} activities. * <> * <> * <> -* <> * <> [float] @@ -22,15 +21,25 @@ You can use the following APIs to add, remove, and retrieve roles in the native * <> * <> +[float] +[[security-token-apis]] +=== Tokens + +You can use the following APIs to create and invalidate bearer tokens for access +without requiring basic authentication: + +* <>, <> + include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] include::security/clear-cache.asciidoc[] include::security/clear-roles-cache.asciidoc[] include::security/create-roles.asciidoc[] include::security/delete-roles.asciidoc[] +include::security/delete-tokens.asciidoc[] include::security/get-roles.asciidoc[] +include::security/get-tokens.asciidoc[] include::security/privileges.asciidoc[] include::security/role-mapping.asciidoc[] include::security/ssl.asciidoc[] -include::security/tokens.asciidoc[] include::security/users.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/delete-tokens.asciidoc b/x-pack/docs/en/rest-api/security/delete-tokens.asciidoc new file mode 100644 index 0000000000000..7d6bae2a4c40f --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-tokens.asciidoc @@ -0,0 +1,54 @@ +[role="xpack"] +[[security-api-invalidate-token]] +=== Delete token API + +Invalidates a bearer token for access without requiring basic authentication. + +==== Request + +`DELETE /_xpack/security/oauth2/token` + +==== Description + +The tokens returned by the <> have a +finite period of time for which they are valid and after that time period, they +can no longer be used. That time period is defined by the +`xpack.security.authc.token.timeout` setting. For more information, see +<>. + +If you want to invalidate a token immediately, use this delete token API. + + +==== Request Body + +The following parameters can be specified in the body of a DELETE request and +pertain to deleting a token: + +`token` (required):: +(string) An access token. + +==== Examples + +The following example invalidates the specified token immediately: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/oauth2/token +{ + "token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" +} +-------------------------------------------------- +// NOTCONSOLE + +A successful call returns a JSON structure that indicates whether the token +has already been invalidated. + +[source,js] +-------------------------------------------------- +{ + "created" : true <1> +} +-------------------------------------------------- +// NOTCONSOLE + +<1> When a token has already been invalidated, `created` is set to false. diff --git a/x-pack/docs/en/rest-api/security/tokens.asciidoc b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc similarity index 62% rename from x-pack/docs/en/rest-api/security/tokens.asciidoc rename to x-pack/docs/en/rest-api/security/get-tokens.asciidoc index f991a5c0cb836..a2c4e6d7a37ec 100644 --- a/x-pack/docs/en/rest-api/security/tokens.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc @@ -1,15 +1,12 @@ [role="xpack"] -[[security-api-tokens]] -=== Token Management APIs +[[security-api-get-token]] +=== Get token API -The `token` API enables you to create and invalidate bearer tokens for access -without requiring basic authentication. +Creates a bearer token for access without requiring basic authentication. ==== Request -`POST /_xpack/security/oauth2/token` + - -`DELETE /_xpack/security/oauth2/token` +`POST /_xpack/security/oauth2/token` ==== Description @@ -19,20 +16,20 @@ you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. -The Get Token API takes the same parameters as a typical OAuth 2.0 token API +The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. -A successful Get Token API call returns a JSON structure that contains the access +A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. -The tokens returned by the Get Token API have a finite period of time for which +The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. For more information, see <>. -If you want to invalidate a token immediately, you can do so by using the Delete -Token API. +If you want to invalidate a token immediately, you can do so by using the +<>. ==== Request Body @@ -41,28 +38,28 @@ The following parameters can be specified in the body of a POST request and pertain to creating a token: `grant_type`:: -(string) The type of grant. Currently only the `password` grant type is supported. +(string) The type of grant. Valid grant types are: `password` and `refresh_token`. -`password` (required):: -(string) The user's password. +`password`:: +(string) The user's password. If you specify the `password` grant type, this +parameter is required. + +`refresh_token`:: +(string) If you specify the `refresh_token` grant type, this parameter is +required. It contains the string that was returned when you created the token +and enables you to extend its life. `scope`:: (string) The scope of the token. Currently tokens are only issued for a scope of `FULL` regardless of the value sent with the request. -`username` (required):: -(string) The username that identifies the user. - -The following parameters can be specified in the body of a DELETE request and -pertain to deleting a token: - -`token`:: -(string) An access token. +`username`:: +(string) The username that identifies the user. If you specify the `password` +grant type, this parameter is required. ==== Examples -[[security-api-get-token]] -To obtain a token, submit a POST request to the `/_xpack/security/oauth2/token` -endpoint. + +The following example obtains a token for the `test_admin` user: [source,js] -------------------------------------------------- @@ -101,8 +98,8 @@ curl -H "Authorization: Bearer dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvb // NOTCONSOLE [[security-api-refresh-token]] -To extend the life of an existing token, the token api may be called again with the refresh -token within 24 hours of the token's creation. +To extend the life of an existing token, you can call the API again with the +refresh token within 24 hours of the token's creation. For example: [source,js] -------------------------------------------------- @@ -116,7 +113,8 @@ POST /_xpack/security/oauth2/token // TEST[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] // TEST[continued] -The API will return a new token and refresh token. Each refresh token may only be used one time. +The API will return a new token and refresh token. Each refresh token may only +be used one time. [source,js] -------------------------------------------------- @@ -128,32 +126,4 @@ The API will return a new token and refresh token. Each refresh token may only b } -------------------------------------------------- // TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] -// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] - -[[security-api-invalidate-token]] -If a token must be invalidated immediately, you can do so by submitting a DELETE -request to `/_xpack/security/oauth2/token`. For example: - -[source,js] --------------------------------------------------- -DELETE /_xpack/security/oauth2/token -{ - "token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" -} --------------------------------------------------- -// CONSOLE -// TEST[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] -// TEST[continued] - -A successful call returns a JSON structure that indicates whether the token -has already been invalidated. - -[source,js] --------------------------------------------------- -{ - "created" : true <1> -} --------------------------------------------------- -// TESTRESPONSE - -<1> When a token has already been invalidated, `created` is set to false. +// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json index 8020d1ecd6d97..0b6f141d10e6a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json @@ -1,6 +1,6 @@ { "xpack.security.get_token": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-tokens.html#security-api-get-token", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html", "methods": [ "POST" ], "url": { "path": "/_xpack/security/oauth2/token", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json index be032c2ffd020..27dd103091422 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json @@ -1,6 +1,6 @@ { "xpack.security.invalidate_token": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-tokens.html#security-api-invalidate-token", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html", "methods": [ "DELETE" ], "url": { "path": "/_xpack/security/oauth2/token", From 532d552ffd61751f18778596e32c3e0d9ca1bd80 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 17 Aug 2018 23:17:33 -0700 Subject: [PATCH 45/46] [DOCS] Splits the users API documentation into multiple pages (#32825) --- docs/reference/redirects.asciidoc | 11 + x-pack/docs/build.gradle | 13 + x-pack/docs/en/rest-api/security.asciidoc | 19 +- .../security/change-password.asciidoc | 21 +- .../rest-api/security/create-users.asciidoc | 107 +++++++++ .../rest-api/security/delete-users.asciidoc | 48 ++++ .../rest-api/security/disable-users.asciidoc | 43 ++++ .../rest-api/security/enable-users.asciidoc | 42 ++++ .../en/rest-api/security/get-users.asciidoc | 74 ++++++ .../docs/en/rest-api/security/users.asciidoc | 226 ------------------ .../api/xpack.security.delete_user.json | 2 +- .../api/xpack.security.disable_user.json | 2 +- .../api/xpack.security.enable_user.json | 2 +- .../api/xpack.security.get_user.json | 2 +- .../api/xpack.security.put_user.json | 2 +- 15 files changed, 375 insertions(+), 239 deletions(-) create mode 100644 x-pack/docs/en/rest-api/security/create-users.asciidoc create mode 100644 x-pack/docs/en/rest-api/security/delete-users.asciidoc create mode 100644 x-pack/docs/en/rest-api/security/disable-users.asciidoc create mode 100644 x-pack/docs/en/rest-api/security/enable-users.asciidoc create mode 100644 x-pack/docs/en/rest-api/security/get-users.asciidoc delete mode 100644 x-pack/docs/en/rest-api/security/users.asciidoc diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 948652c37e69a..d67d8a733ac00 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -520,3 +520,14 @@ You can use the following APIs to create and invalidate bearer tokens for access without requiring basic authentication: * <>, <> + +[role="exclude",id="security-api-users"] +=== User Management APIs + +You can use the following APIs to create, read, update, and delete users from the +native realm: + +* <>, <> +* <>, <> +* <> +* <> diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 48ac4ba565e55..aab8435558198 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -736,3 +736,16 @@ setups['admin_role'] = ''' "metadata" : {"version": 1} } ''' +setups['jacknich_user'] = ''' + - do: + xpack.security.put_user: + username: "jacknich" + body: > + { + "password" : "test-password", + "roles" : [ "admin", "other_role1" ], + "full_name" : "Jack Nicholson", + "email" : "jacknich@example.com", + "metadata" : { "intelligence" : 7 } + } +''' diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index 27e54df38b31c..f5b0c8eef667d 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -9,7 +9,6 @@ You can use the following APIs to perform {security} activities. * <> * <> * <> -* <> [float] [[security-role-apis]] @@ -30,16 +29,32 @@ without requiring basic authentication: * <>, <> +[float] +[[security-user-apis]] +=== Users + +You can use the following APIs to create, read, update, and delete users from the +native realm: + +* <>, <> +* <>, <> +* <> +* <> + include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] include::security/clear-cache.asciidoc[] include::security/clear-roles-cache.asciidoc[] include::security/create-roles.asciidoc[] +include::security/create-users.asciidoc[] include::security/delete-roles.asciidoc[] include::security/delete-tokens.asciidoc[] +include::security/delete-users.asciidoc[] +include::security/disable-users.asciidoc[] +include::security/enable-users.asciidoc[] include::security/get-roles.asciidoc[] include::security/get-tokens.asciidoc[] +include::security/get-users.asciidoc[] include::security/privileges.asciidoc[] include::security/role-mapping.asciidoc[] include::security/ssl.asciidoc[] -include::security/users.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/change-password.asciidoc b/x-pack/docs/en/rest-api/security/change-password.asciidoc index 7dee98480e72c..6e6e8cf7375e4 100644 --- a/x-pack/docs/en/rest-api/security/change-password.asciidoc +++ b/x-pack/docs/en/rest-api/security/change-password.asciidoc @@ -1,9 +1,8 @@ [role="xpack"] [[security-api-change-password]] -=== Change Password API +=== Change passwords API -The Change Password API enables you to submit a request to change the password -of a user. +Changes the passwords of users in the native realm. ==== Request @@ -12,6 +11,15 @@ of a user. `POST _xpack/security/user//_password` +==== Description + +You can use the <> to update everything +but a user's `username` and `password`. This API changes a user's password. + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + + ==== Path Parameters `username`:: @@ -33,16 +41,17 @@ privilege can change passwords of other users. ==== Examples -The following example updates the password for the `elastic` user: +The following example updates the password for the `jacknich` user: [source,js] -------------------------------------------------- -POST _xpack/security/user/elastic/_password +POST /_xpack/security/user/jacknich/_password { - "password": "x-pack-test-password" + "password" : "s3cr3t" } -------------------------------------------------- // CONSOLE +// TEST[setup:jacknich_user] A successful call returns an empty JSON structure. diff --git a/x-pack/docs/en/rest-api/security/create-users.asciidoc b/x-pack/docs/en/rest-api/security/create-users.asciidoc new file mode 100644 index 0000000000000..5015d0401c223 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/create-users.asciidoc @@ -0,0 +1,107 @@ +[role="xpack"] +[[security-api-put-user]] +=== Create users API + +Creates and updates users in the native realm. These users are commonly referred +to as _native users_. + + +==== Request + +`POST /_xpack/security/user/` + + +`PUT /_xpack/security/user/` + + +==== Description + +When updating a user, you can update everything but its `username` and `password`. +To change a user's password, use the +<>. + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username` (required):: + (string) An identifier for the user. ++ +-- +[[username-validation]] +NOTE: Usernames must be at least 1 and no more than 1024 characters. They can +contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and +printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. Leading or trailing whitespace is not allowed. + +-- + + +==== Request Body + +The following parameters can be specified in the body of a POST or PUT request: + +`enabled`:: +(boolean) Specifies whether the user is enabled. The default value is `true`. + +`email`:: +(string) The email of the user. + +`full_name`:: +(string) The full name of the user. + +`metadata`:: +(object) Arbitrary metadata that you want to associate with the user. + +`password` (required):: +(string) The user's password. Passwords must be at least 6 characters long. + +`roles` (required):: +(list) A set of roles the user has. The roles determine the user's access +permissions. To create a user without any roles, specify an empty list: `[]`. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example creates a user `jacknich`: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/user/jacknich +{ + "password" : "j@rV1s", + "roles" : [ "admin", "other_role1" ], + "full_name" : "Jack Nicholson", + "email" : "jacknich@example.com", + "metadata" : { + "intelligence" : 7 + } +} +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure that shows whether the user has been +created or updated. + +[source,js] +-------------------------------------------------- +{ + "user": { + "created" : true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing user is updated, `created` is set to false. + +After you add a user, requests from that user can be authenticated. For example: + +[source,shell] +-------------------------------------------------- +curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health +-------------------------------------------------- +// NOTCONSOLE diff --git a/x-pack/docs/en/rest-api/security/delete-users.asciidoc b/x-pack/docs/en/rest-api/security/delete-users.asciidoc new file mode 100644 index 0000000000000..63a66795617bd --- /dev/null +++ b/x-pack/docs/en/rest-api/security/delete-users.asciidoc @@ -0,0 +1,48 @@ +[role="xpack"] +[[security-api-delete-user]] +=== Delete users API + +Deletes users from the native realm. + +==== Request + +`DELETE /_xpack/security/user/` + +==== Description + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username` (required):: + (string) An identifier for the user. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example deletes the user `jacknich`: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/user/jacknich +-------------------------------------------------- +// CONSOLE +// TEST[setup:jacknich_user] + +If the user is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/disable-users.asciidoc b/x-pack/docs/en/rest-api/security/disable-users.asciidoc new file mode 100644 index 0000000000000..f5a6bc7e9a136 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/disable-users.asciidoc @@ -0,0 +1,43 @@ +[role="xpack"] +[[security-api-disable-user]] +=== Disable users API + +Disables users in the native realm. + + +==== Request + +`PUT /_xpack/security/user//_disable` + + +==== Description + +By default, when you create users, they are enabled. You can use this API to +revoke a user's access to {es}. To re-enable a user, there is an +<>. + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username` (required):: + (string) An identifier for the user. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example disables the user `jacknich`: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/user/jacknich/_disable +-------------------------------------------------- +// CONSOLE +// TEST[setup:jacknich_user] diff --git a/x-pack/docs/en/rest-api/security/enable-users.asciidoc b/x-pack/docs/en/rest-api/security/enable-users.asciidoc new file mode 100644 index 0000000000000..cebaaffa7b28d --- /dev/null +++ b/x-pack/docs/en/rest-api/security/enable-users.asciidoc @@ -0,0 +1,42 @@ +[role="xpack"] +[[security-api-enable-user]] +=== Enable users API + +Enables users in the native realm. + + +==== Request + +`PUT /_xpack/security/user//_enable` + + +==== Description + +By default, when you create users, they are enabled. You can use this enable +users API and the <> to change that attribute. + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username` (required):: + (string) An identifier for the user. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +The following example enables the user `jacknich`: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/user/jacknich/_enable +-------------------------------------------------- +// CONSOLE +// TEST[setup:jacknich_user] diff --git a/x-pack/docs/en/rest-api/security/get-users.asciidoc b/x-pack/docs/en/rest-api/security/get-users.asciidoc new file mode 100644 index 0000000000000..2a20baacb0f52 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/get-users.asciidoc @@ -0,0 +1,74 @@ +[role="xpack"] +[[security-api-get-user]] +=== Get users API + +Retrieves information about users in the native realm. + + +==== Request + +`GET /_xpack/security/user` + + +`GET /_xpack/security/user/` + +==== Description + +For more information about the native realm, see +{stack-ov}/realms.html[Realms] and <>. + +==== Path Parameters + +`username`:: + (string) An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves + information about all users. + +//==== Request Body + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +To retrieve a native user, submit a GET request to the `/_xpack/security/user/` +endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/user/jacknich +-------------------------------------------------- +// CONSOLE +// TEST[setup:jacknich_user] + +A successful call returns an array of users with the JSON representation of the +user. Note that user passwords are not included. + +[source,js] +-------------------------------------------------- +{ + "jacknich": { + "username": "jacknich", + "roles": [ + "admin", "other_role1" + ], + "full_name": "Jack Nicholson", + "email": "jacknich@example.com", + "metadata": { "intelligence" : 7 }, + "enabled": true + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +If the user is not defined in the `native` realm, the request 404s. + +Omit the username to retrieve all users: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/user +-------------------------------------------------- +// CONSOLE +// TEST[continued] diff --git a/x-pack/docs/en/rest-api/security/users.asciidoc b/x-pack/docs/en/rest-api/security/users.asciidoc deleted file mode 100644 index c84da5c7d75ff..0000000000000 --- a/x-pack/docs/en/rest-api/security/users.asciidoc +++ /dev/null @@ -1,226 +0,0 @@ -[role="xpack"] -[[security-api-users]] -=== User Management APIs - -The `user` API enables you to create, read, update, and delete users from the -`native` realm. These users are commonly referred to as *native users*. - - -==== Request - -`GET /_xpack/security/user` + - -`GET /_xpack/security/user/` + - -`DELETE /_xpack/security/user/` + - -`POST /_xpack/security/user/` + - -`PUT /_xpack/security/user/` + - -`PUT /_xpack/security/user//_disable` + - -`PUT /_xpack/security/user//_enable` + - -`PUT /_xpack/security/user//_password` - - -==== Description - -You can use the PUT user API to create or update users. When updating a user, -you can update everything but its `username` and `password`. To change a user's -password, use the <>. - -[[username-validation]] -NOTE: Usernames must be at least 1 and no more than 1024 characters. They can -contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and -printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. -Leading or trailing whitespace is not allowed. - -==== Path Parameters - -`username`:: - (string) An identifier for the user. If you omit this parameter from a Get - User API request, it retrieves information about all users. - - -==== Request Body - -The following parameters can be specified in the body of a POST or PUT request -and pertain to creating a user: - -`enabled`:: -(boolean) Specifies whether the user is enabled. The default value is `true`. - -`email`:: -(string) The email of the user. - -`full_name`:: -(string) The full name of the user. - -`metadata`:: -(object) Arbitrary metadata that you want to associate with the user. - -`password` (required):: -(string) The user's password. Passwords must be at least 6 characters long. - -`roles` (required):: -(list) A set of roles the user has. The roles determine the user's access -permissions. To create a user without any roles, specify an empty list: `[]`. - -==== Authorization - -To use this API, you must have at least the `manage_security` cluster privilege. - - -==== Examples - -[[security-api-put-user]] -To add a user, submit a PUT or POST request to the `/_xpack/security/user/` -endpoint. - -[source,js] --------------------------------------------------- -POST /_xpack/security/user/jacknich -{ - "password" : "j@rV1s", - "roles" : [ "admin", "other_role1" ], - "full_name" : "Jack Nicholson", - "email" : "jacknich@example.com", - "metadata" : { - "intelligence" : 7 - } -} --------------------------------------------------- -// CONSOLE - -A successful call returns a JSON structure that shows whether the user has been -created or updated. - -[source,js] --------------------------------------------------- -{ - "user": { - "created" : true <1> - } -} --------------------------------------------------- -// TESTRESPONSE -<1> When an existing user is updated, `created` is set to false. - -After you add a user through the Users API, requests from that user can be -authenticated. For example: - -[source,shell] --------------------------------------------------- -curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health --------------------------------------------------- -// NOTCONSOLE - -[[security-api-get-user]] -To retrieve a native user, submit a GET request to the `/_xpack/security/user/` -endpoint: - -[source,js] --------------------------------------------------- -GET /_xpack/security/user/jacknich --------------------------------------------------- -// CONSOLE -// TEST[continued] - -A successful call returns an array of users with the JSON representation of the -user. Note that user passwords are not included. - -[source,js] --------------------------------------------------- -{ - "jacknich": { <1> - "username" : "jacknich", - "roles" : [ "admin", "other_role1" ], - "full_name" : "Jack Nicholson", - "email" : "jacknich@example.com", - "enabled": true, - "metadata" : { - "intelligence" : 7 - } - } -} --------------------------------------------------- -// TESTRESPONSE -<1> If the user is not defined in the `native` realm, the request 404s. - -You can specify multiple usernames as a comma-separated list: - -[source,js] --------------------------------------------------- -GET /_xpack/security/user/jacknich,rdinero --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Omit the username to retrieve all users: - -[source,js] --------------------------------------------------- -GET /_xpack/security/user --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[[security-api-reset-user-password]] -To reset the password for a user, submit a PUT request to the -`/_xpack/security/user//_password` endpoint: - -[source,js] --------------------------------------------------- -PUT /_xpack/security/user/jacknich/_password -{ - "password" : "s3cr3t" -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[[security-api-disable-user]] -To disable a user, submit a PUT request to the -`/_xpack/security/user//_disable` endpoint: - -[source,js] --------------------------------------------------- -PUT /_xpack/security/user/jacknich/_disable --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[[security-api-enable-user]] -To enable a user, submit a PUT request to the -`/_xpack/security/user//_enable` endpoint: - -[source,js] --------------------------------------------------- -PUT /_xpack/security/user/jacknich/_enable --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[[security-api-delete-user]] -To delete a user, submit a DELETE request to the `/_xpack/security/user/` -endpoint: - -[source,js] --------------------------------------------------- -DELETE /_xpack/security/user/jacknich --------------------------------------------------- -// CONSOLE -// TEST[continued] - -If the user is successfully deleted, the request returns `{"found": true}`. -Otherwise, `found` is set to false. - -[source,js] --------------------------------------------------- -{ - "found" : true -} --------------------------------------------------- -// TESTRESPONSE diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json index d72c854a69dcb..fa1deb3e1ec13 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json @@ -1,6 +1,6 @@ { "xpack.security.delete_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-delete-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html", "methods": [ "DELETE" ], "url": { "path": "/_xpack/security/user/{username}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json index 3a72b3141911f..0e55e82ead628 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json @@ -1,6 +1,6 @@ { "xpack.security.disable_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-disable-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/user/{username}/_disable", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json index c68144957f07d..da2f67adbea37 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json @@ -1,6 +1,6 @@ { "xpack.security.enable_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-enable-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/user/{username}/_enable", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json index 910fb7d064582..94dcbca81e18e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json @@ -1,6 +1,6 @@ { "xpack.security.get_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-get-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html", "methods": [ "GET" ], "url": { "path": "/_xpack/security/user/{username}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json index de07498a40954..1b51783a05ef5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json @@ -1,6 +1,6 @@ { "xpack.security.put_user": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-put-user", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html", "methods": [ "PUT", "POST" ], "url": { "path": "/_xpack/security/user/{username}", From f82bb64feba3d3420dd790d02cb7dd5ee612df4d Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 18 Aug 2018 08:46:44 +0200 Subject: [PATCH 46/46] NETWORKING: Make RemoteClusterConn. Lazy Resolve DNS (#32764) * Lazy resolve DNS (i.e. `String` to `DiscoveryNode`) to not run into indefinitely caching lookup issues (provided the JVM dns cache is configured correctly as explained in https://www.elastic.co/guide/en/elasticsearch/reference/6.3/networkaddress-cache-ttl.html) * Changed `InetAddress` type to `String` for that higher up the stack * Passed down `Supplier` instead of outright `DiscoveryNode` from `RemoteClusterAware#buildRemoteClustersSeeds` on to lazy resolve DNS when the `DiscoveryNode` is actually used (could've also passed down the value of `clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting)` together with the `List` of hosts, but this route seemed to introduce less duplication and resulted in a significantly smaller changeset). * Closes #28858 --- .../transport/RemoteClusterAware.java | 66 +++++++---- .../transport/RemoteClusterConnection.java | 17 +-- .../transport/RemoteClusterService.java | 18 +-- .../RemoteClusterConnectionTests.java | 104 ++++++++++++------ .../transport/RemoteClusterServiceTests.java | 47 ++++---- .../authz/IndicesAndAliasesResolver.java | 3 +- 6 files changed, 153 insertions(+), 102 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 107a4b32d89f9..a12f27c93e3c4 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -48,9 +49,20 @@ public abstract class RemoteClusterAware extends AbstractComponent { /** * A list of initial seed nodes to discover eligible nodes from the remote cluster */ - public static final Setting.AffixSetting> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting("search.remote.", - "seeds", (key) -> Setting.listSetting(key, Collections.emptyList(), RemoteClusterAware::parseSeedAddress, - Setting.Property.NodeScope, Setting.Property.Dynamic)); + public static final Setting.AffixSetting> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting( + "search.remote.", + "seeds", + key -> Setting.listSetting( + key, Collections.emptyList(), + s -> { + // validate seed address + parsePort(s); + return s; + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + ); public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':'; public static final String LOCAL_CLUSTER_GROUP_KEY = ""; @@ -65,18 +77,20 @@ protected RemoteClusterAware(Settings settings) { this.clusterNameResolver = new ClusterNameExpressionResolver(settings); } - protected static Map> buildRemoteClustersSeeds(Settings settings) { - Stream>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings); + protected static Map>> buildRemoteClustersSeeds(Settings settings) { + Stream>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings); return allConcreteSettings.collect( Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> { String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting); - List nodes = new ArrayList<>(); - for (InetSocketAddress address : concreteSetting.get(settings)) { - TransportAddress transportAddress = new TransportAddress(address); - DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(), - transportAddress, - Version.CURRENT.minimumCompatibilityVersion()); - nodes.add(node); + List addresses = concreteSetting.get(settings); + List> nodes = new ArrayList<>(addresses.size()); + for (String address : addresses) { + nodes.add(() -> { + TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address)); + return new DiscoveryNode(clusterName + "#" + transportAddress.toString(), + transportAddress, + Version.CURRENT.minimumCompatibilityVersion()); + }); } return nodes; })); @@ -128,7 +142,7 @@ public Map> groupClusterIndices(String[] requestIndices, Pr * Subclasses must implement this to receive information about updated cluster aliases. If the given address list is * empty the cluster alias is unregistered and should be removed. */ - protected abstract void updateRemoteCluster(String clusterAlias, List addresses); + protected abstract void updateRemoteCluster(String clusterAlias, List addresses); /** * Registers this instance to listen to updates on the cluster settings. @@ -138,27 +152,35 @@ public void listenForUpdates(ClusterSettings clusterSettings) { (namespace, value) -> {}); } - private static InetSocketAddress parseSeedAddress(String remoteHost) { - int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 - if (portSeparator == -1 || portSeparator == remoteHost.length()) { - throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); - } - String host = remoteHost.substring(0, portSeparator); + protected static InetSocketAddress parseSeedAddress(String remoteHost) { + String host = remoteHost.substring(0, indexOfPortSeparator(remoteHost)); InetAddress hostAddress; try { hostAddress = InetAddress.getByName(host); } catch (UnknownHostException e) { throw new IllegalArgumentException("unknown host [" + host + "]", e); } + return new InetSocketAddress(hostAddress, parsePort(remoteHost)); + } + + private static int parsePort(String remoteHost) { try { - int port = Integer.valueOf(remoteHost.substring(portSeparator + 1)); + int port = Integer.valueOf(remoteHost.substring(indexOfPortSeparator(remoteHost) + 1)); if (port <= 0) { throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]"); } - return new InetSocketAddress(hostAddress, port); + return port; } catch (NumberFormatException e) { - throw new IllegalArgumentException("port must be a number", e); + throw new IllegalArgumentException("failed to parse port", e); + } + } + + private static int indexOfPortSeparator(String remoteHost) { + int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 + if (portSeparator == -1 || portSeparator == remoteHost.length()) { + throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); } + return portSeparator; } public static String buildRemoteIndexName(String clusterAlias, String indexName) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 67c0e1a5aa64a..15cf7899dc03f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; @@ -84,7 +85,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo private final String clusterAlias; private final int maxNumRemoteConnections; private final Predicate nodePredicate; - private volatile List seedNodes; + private volatile List> seedNodes; private volatile boolean skipUnavailable; private final ConnectHandler connectHandler; private SetOnce remoteClusterName = new SetOnce<>(); @@ -99,7 +100,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * @param maxNumRemoteConnections the maximum number of connections to the remote cluster * @param nodePredicate a predicate to filter eligible remote nodes to connect to */ - RemoteClusterConnection(Settings settings, String clusterAlias, List seedNodes, + RemoteClusterConnection(Settings settings, String clusterAlias, List> seedNodes, TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate) { super(settings); this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); @@ -127,7 +128,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo /** * Updates the list of seed nodes for this cluster connection */ - synchronized void updateSeedNodes(List seedNodes, ActionListener connectListener) { + synchronized void updateSeedNodes(List> seedNodes, ActionListener connectListener) { this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes)); connectHandler.connect(connectListener); } @@ -456,7 +457,7 @@ protected void doRun() { }); } - void collectRemoteNodes(Iterator seedNodes, + private void collectRemoteNodes(Iterator> seedNodes, final TransportService transportService, ActionListener listener) { if (Thread.currentThread().isInterrupted()) { listener.onFailure(new InterruptedException("remote connect thread got interrupted")); @@ -464,7 +465,7 @@ void collectRemoteNodes(Iterator seedNodes, try { if (seedNodes.hasNext()) { cancellableThreads.executeIO(() -> { - final DiscoveryNode seedNode = seedNodes.next(); + final DiscoveryNode seedNode = seedNodes.next().get(); final TransportService.HandshakeResponse handshakeResponse; Transport.Connection connection = transportService.openConnection(seedNode, ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null)); @@ -554,11 +555,11 @@ private class SniffClusterStateResponseHandler implements TransportResponseHandl private final TransportService transportService; private final Transport.Connection connection; private final ActionListener listener; - private final Iterator seedNodes; + private final Iterator> seedNodes; private final CancellableThreads cancellableThreads; SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection, - ActionListener listener, Iterator seedNodes, + ActionListener listener, Iterator> seedNodes, CancellableThreads cancellableThreads) { this.transportService = transportService; this.connection = connection; @@ -651,7 +652,7 @@ void addConnectedNode(DiscoveryNode node) { * Get the information about remote nodes to be rendered on {@code _remote/info} requests. */ public RemoteConnectionInfo getConnectionInfo() { - List seedNodeAddresses = seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()); + List seedNodeAddresses = seedNodes.stream().map(node -> node.get().getAddress()).collect(Collectors.toList()); TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); return new RemoteConnectionInfo(clusterAlias, seedNodeAddresses, maxNumRemoteConnections, connectedNodes.size(), initialConnectionTimeout, skipUnavailable); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index a07de63d53734..956a0d94179e0 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; @@ -40,7 +41,6 @@ import java.io.Closeable; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -115,7 +115,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl * @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes * @param connectionListener a listener invoked once every configured cluster has been connected to */ - private synchronized void updateRemoteClusters(Map> seeds, ActionListener connectionListener) { + private synchronized void updateRemoteClusters(Map>> seeds, + ActionListener connectionListener) { if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) { throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); } @@ -125,7 +126,7 @@ private synchronized void updateRemoteClusters(Map> } else { CountDown countDown = new CountDown(seeds.size()); remoteClusters.putAll(this.remoteClusters); - for (Map.Entry> entry : seeds.entrySet()) { + for (Map.Entry>> entry : seeds.entrySet()) { RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey()); if (entry.getValue().isEmpty()) { // with no seed nodes we just remove the connection try { @@ -310,16 +311,17 @@ synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavail } } - protected void updateRemoteCluster(String clusterAlias, List addresses) { + @Override + protected void updateRemoteCluster(String clusterAlias, List addresses) { updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {})); } void updateRemoteCluster( final String clusterAlias, - final List addresses, + final List addresses, final ActionListener connectionListener) { - final List nodes = addresses.stream().map(address -> { - final TransportAddress transportAddress = new TransportAddress(address); + final List> nodes = addresses.stream().>map(address -> () -> { + final TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address)); final String id = clusterAlias + "#" + transportAddress.toString(); final Version version = Version.CURRENT.minimumCompatibilityVersion(); return new DiscoveryNode(id, transportAddress, version); @@ -334,7 +336,7 @@ void updateRemoteCluster( void initializeRemoteClusters() { final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); final PlainActionFuture future = new PlainActionFuture<>(); - Map> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings); + Map>> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings); updateRemoteClusters(seeds, future); try { future.get(timeValue.millis(), TimeUnit.MILLISECONDS); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 8da4064d1c89a..3d0388ccfad96 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -158,8 +159,8 @@ public void testLocalProfileIsUsedForLocalCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -198,8 +199,8 @@ public void testRemoteProfileIsUsedForRemoteCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -254,8 +255,8 @@ public void testDiscoverSingleNode() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -276,7 +277,7 @@ public void testDiscoverSingleNodeWithIncompatibleSeed() throws Exception { knownNodes.add(discoverableTransport.getLocalDiscoNode()); knownNodes.add(incompatibleTransport.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(incompatibleSeedNode, seedNode); + List> seedNodes = Arrays.asList(() -> incompatibleSeedNode, () -> seedNode); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -310,8 +311,8 @@ public void testNodeDisconnected() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertFalse(service.nodeConnected(spareNode)); @@ -359,8 +360,8 @@ public void testFilterDiscoveredNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); if (rejectedNode.equals(seedNode)) { assertFalse(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -374,7 +375,7 @@ public void testFilterDiscoveredNodes() throws Exception { } } - private void updateSeedNodes(RemoteClusterConnection connection, List seedNodes) throws Exception { + private void updateSeedNodes(RemoteClusterConnection connection, List> seedNodes) throws Exception { CountDownLatch latch = new CountDownLatch(1); AtomicReference exceptionAtomicReference = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(x -> latch.countDown(), x -> { @@ -398,8 +399,8 @@ public void testConnectWithIncompatibleTransports() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(seedNode))); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(() -> seedNode))); assertFalse(service.nodeConnected(seedNode)); assertTrue(connection.assertNoRunningConnections()); } @@ -461,7 +462,7 @@ public void close() { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(seedNode); for (DiscoveryNode node : knownNodes) { final Transport.Connection transportConnection = connection.getConnection(node); @@ -504,7 +505,7 @@ public void run() { CountDownLatch listenerCalled = new CountDownLatch(1); AtomicReference exceptionReference = new AtomicReference<>(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -512,7 +513,7 @@ public void run() { exceptionReference.set(x); listenerCalled.countDown(); }); - connection.updateSeedNodes(Arrays.asList(seedNode), listener); + connection.updateSeedNodes(Arrays.asList(() -> seedNode), listener); acceptedLatch.await(); connection.close(); // now close it, this should trigger an interrupt on the socket and we can move on assertTrue(connection.assertNoRunningConnections()); @@ -539,7 +540,7 @@ public void testFetchShards() throws Exception { try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); - List nodes = Collections.singletonList(seedNode); + List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", nodes, service, Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { @@ -579,7 +580,7 @@ public void testFetchShardsThreadContextHeader() throws Exception { try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); - List nodes = Collections.singletonList(seedNode); + List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", nodes, service, Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); @@ -635,7 +636,7 @@ public void testFetchShardsSkipUnavailable() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") @@ -738,7 +739,7 @@ public void testTriggerUpdatesConcurrently() throws IOException, InterruptedExce knownNodes.add(discoverableTransport.getLocalDiscoNode()); knownNodes.add(seedTransport1.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(seedNode1, seedNode); + List> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -816,7 +817,7 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt knownNodes.add(discoverableTransport.getLocalDiscoNode()); knownNodes.add(seedTransport1.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(seedNode1, seedNode); + List> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -904,7 +905,7 @@ public void testGetConnectionInfo() throws Exception { knownNodes.add(transport3.getLocalDiscoNode()); knownNodes.add(transport2.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(node3, node1, node2); + List> seedNodes = Arrays.asList(() -> node3, () -> node1, () -> node2); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -1059,7 +1060,7 @@ public void testEnsureConnected() throws IOException, InterruptedException { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { assertFalse(service.nodeConnected(seedNode)); assertFalse(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -1108,9 +1109,9 @@ public void testCollectNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); } CountDownLatch responseLatch = new CountDownLatch(1); AtomicReference> reference = new AtomicReference<>(); @@ -1142,14 +1143,14 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted List discoverableTransports = new CopyOnWriteArrayList<>(); try { final int numDiscoverableNodes = randomIntBetween(5, 20); - List discoverableNodes = new ArrayList<>(numDiscoverableNodes); - for (int i = 0; i < numDiscoverableNodes; i++) { + List> discoverableNodes = new ArrayList<>(numDiscoverableNodes); + for (int i = 0; i < numDiscoverableNodes; i++ ) { MockTransportService transportService = startTransport("discoverable_node" + i, knownNodes, Version.CURRENT); - discoverableNodes.add(transportService.getLocalDiscoNode()); + discoverableNodes.add(transportService::getLocalDiscoNode); discoverableTransports.add(transportService); } - List seedNodes = randomSubsetOf(discoverableNodes); + List> seedNodes = randomSubsetOf(discoverableNodes); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -1198,7 +1199,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted discoverableTransports.add(transportService); connection.addConnectedNode(transportService.getLocalDiscoNode()); } else { - DiscoveryNode node = randomFrom(discoverableNodes); + DiscoveryNode node = randomFrom(discoverableNodes).get(); connection.onNodeDisconnected(node); } } @@ -1246,12 +1247,13 @@ public void testClusterNameIsChecked() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList( () -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); - List discoveryNodes = Arrays.asList(otherClusterTransport.getLocalDiscoNode(), seedNode); + List> discoveryNodes = + Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode(), () -> seedNode); Collections.shuffle(discoveryNodes, random()); updateSeedNodes(connection, discoveryNodes); assertTrue(service.nodeConnected(seedNode)); @@ -1262,7 +1264,7 @@ public void testClusterNameIsChecked() throws Exception { assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> - updateSeedNodes(connection, Arrays.asList(otherClusterTransport.getLocalDiscoNode()))); + updateSeedNodes(connection, Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode()))); assertThat(illegalStateException.getMessage(), startsWith("handshake failed, mismatched cluster name [Cluster [otherCluster]]" + " - {other_cluster_discoverable_node}")); @@ -1325,7 +1327,7 @@ public void close() { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(connectedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> connectedNode), service, Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(connectedNode); for (int i = 0; i < 10; i++) { //always a direct connection as the remote node is already connected @@ -1348,4 +1350,34 @@ public void close() { } } } + + public void testLazyResolveTransportAddress() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + CountDownLatch multipleResolveLatch = new CountDownLatch(2); + Supplier seedSupplier = () -> { + multipleResolveLatch.countDown(); + return seedNode; + }; + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(seedSupplier)); + // Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes + // being called again so we try to resolve the same seed node's host twice + discoverableTransport.close(); + seedTransport.close(); + assertTrue(multipleResolveLatch.await(30L, TimeUnit.SECONDS)); + } + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 03d76b5a953c6..c94b1cbdef547 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -103,10 +104,19 @@ public void testRemoteClusterSeedSetting() { .put("search.remote.foo.seeds", "192.168.0.1").build(); expectThrows(IllegalArgumentException.class, () -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings).forEach(setting -> setting.get(brokenSettings))); + + Settings brokenPortSettings = Settings.builder() + .put("search.remote.foo.seeds", "192.168.0.1:123456789123456789").build(); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings) + .forEach(setting -> setting.get(brokenPortSettings)) + ); + assertEquals("failed to parse port", e.getMessage()); } public void testBuiltRemoteClustersSeeds() throws Exception { - Map> map = RemoteClusterService.buildRemoteClustersSeeds( + Map>> map = RemoteClusterService.buildRemoteClustersSeeds( Settings.builder().put("search.remote.foo.seeds", "192.168.0.1:8080").put("search.remote.bar.seeds", "[::1]:9090").build()); assertEquals(2, map.size()); assertTrue(map.containsKey("foo")); @@ -114,13 +124,13 @@ public void testBuiltRemoteClustersSeeds() throws Exception { assertEquals(1, map.get("foo").size()); assertEquals(1, map.get("bar").size()); - DiscoveryNode foo = map.get("foo").get(0); + DiscoveryNode foo = map.get("foo").get(0).get(); assertEquals(foo.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("192.168.0.1"), 8080))); assertEquals(foo.getId(), "foo#192.168.0.1:8080"); assertEquals(foo.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); - DiscoveryNode bar = map.get("bar").get(0); + DiscoveryNode bar = map.get("bar").get(0).get(); assertEquals(bar.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("[::1]"), 9090))); assertEquals(bar.getId(), "bar#[::1]:9090"); assertEquals(bar.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); @@ -194,10 +204,10 @@ public void testIncrementallyAddClusters() throws IOException { assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().address())); + service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().toString())); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); - service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().address())); + service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().toString())); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); assertTrue(service.isRemoteClusterRegistered("cluster_2")); @@ -252,22 +262,17 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); - final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); - final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); - final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); - final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Address, c1N2Address), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Address, c2N2Address), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), connectionListener(secondLatch)); secondLatch.await(); @@ -321,22 +326,17 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); - final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); - final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); - final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); - final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Address, c1N2Address), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Address, c2N2Address), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), connectionListener(secondLatch)); secondLatch.await(); @@ -398,22 +398,17 @@ public void testCollectNodes() throws InterruptedException, IOException { service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); - final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); - final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); - final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); - final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Address, c1N2Address), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Address, c2N2Address), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), connectionListener(secondLatch)); secondLatch.await(); CountDownLatch latch = new CountDownLatch(1); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index 2247cbe02a812..c388fd5627c32 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -30,7 +30,6 @@ import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -429,7 +428,7 @@ protected Set getRemoteClusterNames() { } @Override - protected void updateRemoteCluster(String clusterAlias, List addresses) { + protected void updateRemoteCluster(String clusterAlias, List addresses) { if (addresses.isEmpty()) { clusters.remove(clusterAlias); } else {