From 7b6d0faec268c6246ddea4a2916552752dcb75b9 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Wed, 16 May 2018 15:07:14 -0400 Subject: [PATCH 01/25] SQL: Remove dependency for server's version from JDBC driver (#30631) Removes dependency for server's version from the JDBC driver code. This should allow us to dramatically reduce driver's size by removing the server dependency from the driver. Relates #29856 --- .../xpack/sql/jdbc/net/client/JdbcHttpClient.java | 4 +++- .../xpack/sql/cli/command/CliSession.java | 3 ++- .../xpack/sql/cli/command/ServerInfoCliCommand.java | 2 +- .../elasticsearch/xpack/sql/cli/CliSessionTests.java | 4 ++-- .../sql/cli/command/ServerInfoCliCommandTests.java | 2 +- .../elasticsearch/xpack/sql/proto/MainResponse.java | 10 ++++------ 6 files changed, 13 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java index 89ee78e0bae9e..17afc34efffe6 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.client.shared.Version; import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse; @@ -79,7 +80,8 @@ public InfoResponse serverInfo() throws SQLException { private InfoResponse fetchServerInfo() throws SQLException { MainResponse mainResponse = httpClient.serverInfo(); - return new InfoResponse(mainResponse.getClusterName(), mainResponse.getVersion().major, mainResponse.getVersion().minor); + Version version = Version.fromString(mainResponse.getVersion()); + return new InfoResponse(mainResponse.getClusterName(), version.major, version.minor); } /** diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java index 8e030f36dd042..fc89e3939cc35 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java @@ -65,8 +65,9 @@ public void checkConnection() throws ClientException { } catch (SQLException ex) { throw new ClientException(ex); } + Version version = Version.fromString(response.getVersion()); // TODO: We can relax compatibility requirement later when we have a better idea about protocol compatibility guarantees - if (response.getVersion().major != Version.CURRENT.major || response.getVersion().minor != Version.CURRENT.minor) { + if (version.major != Version.CURRENT.major || version.minor != Version.CURRENT.minor) { throw new ClientException("This alpha version of CLI is only compatible with Elasticsearch version " + Version.CURRENT.toString()); } diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java index e637386f9798f..9e7b75102ec6f 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java @@ -31,7 +31,7 @@ public boolean doHandle(CliTerminal terminal, CliSession cliSession, String line terminal.line() .text("Node:").em(info.getNodeName()) .text(" Cluster:").em(info.getClusterName()) - .text(" Version:").em(info.getVersion().toString()) + .text(" Version:").em(info.getVersion()) .ln(); return true; } diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java index e5643ad443a59..265051a5a58df 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java @@ -27,7 +27,7 @@ public class CliSessionTests extends ESTestCase { public void testProperConnection() throws Exception { HttpClient httpClient = mock(HttpClient.class); - when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT, + when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT.toString(), ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); CliSession cliSession = new CliSession(httpClient); cliSession.checkConnection(); @@ -57,7 +57,7 @@ public void testWrongServerVersion() throws Exception { } when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), - org.elasticsearch.Version.fromString(major + "." + minor + ".23"), + org.elasticsearch.Version.fromString(major + "." + minor + ".23").toString(), ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); CliSession cliSession = new CliSession(httpClient); expectThrows(ClientException.class, cliSession::checkConnection); diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java index e99cb2fb7f7e2..6c9d4933a9912 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java @@ -35,7 +35,7 @@ public void testShowInfo() throws Exception { TestTerminal testTerminal = new TestTerminal(); HttpClient client = mock(HttpClient.class); CliSession cliSession = new CliSession(client); - when(client.serverInfo()).thenReturn(new MainResponse("my_node", org.elasticsearch.Version.fromString("1.2.3"), + when(client.serverInfo()).thenReturn(new MainResponse("my_node", "1.2.3", new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID(), Build.CURRENT)); ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "info")); diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java index 73b6cbc529ec6..c8bb0c51f7fe7 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.proto; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; @@ -19,8 +18,7 @@ */ public class MainResponse { private String nodeName; - // TODO: Add parser for Version - private Version version; + private String version; private String clusterName; private String clusterUuid; // TODO: Add parser for Build @@ -29,7 +27,7 @@ public class MainResponse { private MainResponse() { } - public MainResponse(String nodeName, Version version, String clusterName, String clusterUuid, Build build) { + public MainResponse(String nodeName, String version, String clusterName, String clusterUuid, Build build) { this.nodeName = nodeName; this.version = version; this.clusterName = clusterName; @@ -41,7 +39,7 @@ public String getNodeName() { return nodeName; } - public Version getVersion() { + public String getVersion() { return version; } @@ -76,7 +74,7 @@ public Build getBuild() { (String) value.get("build_hash"), (String) value.get("build_date"), (boolean) value.get("build_snapshot")); - response.version = Version.fromString((String) value.get("number")); + response.version = (String) value.get("number"); }, (parser, context) -> parser.map(), new ParseField("version")); } From 4f41018753664296856a47b25d9faf2d29142a39 Mon Sep 17 00:00:00 2001 From: lcawl Date: Mon, 14 May 2018 16:45:09 -0700 Subject: [PATCH 02/25] [DOCS] Reorganizes RBAC documentation --- .../authorization/built-in-roles.asciidoc | 114 +++++++ .../authorization/managing-roles.asciidoc | 175 +++++++++++ .../security/authorization/overview.asciidoc | 290 +----------------- .../privileges.asciidoc | 9 +- x-pack/docs/en/security/reference.asciidoc | 2 - 5 files changed, 297 insertions(+), 293 deletions(-) create mode 100644 x-pack/docs/en/security/authorization/built-in-roles.asciidoc create mode 100644 x-pack/docs/en/security/authorization/managing-roles.asciidoc rename x-pack/docs/en/security/{reference => authorization}/privileges.asciidoc (97%) diff --git a/x-pack/docs/en/security/authorization/built-in-roles.asciidoc b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc new file mode 100644 index 0000000000000..f336393d81d3d --- /dev/null +++ b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc @@ -0,0 +1,114 @@ +[role="xpack"] +[[built-in-roles]] +=== Built-in roles + +{security} applies a default role to all users, including +<>. The default role enables users to access +the authenticate endpoint, change their own passwords, and get information about +themselves. + +{security} also provides a set of built-in roles you can explicitly assign +to users. These roles have a fixed set of privileges and cannot be updated. + +[[built-in-roles-ingest-user]] `ingest_admin` :: +Grants access to manage *all* index templates and *all* ingest pipeline configurations. ++ +NOTE: This role does *not* provide the ability to create indices; those privileges +must be defined in a separate role. + +[[built-in-roles-kibana-dashboard]] `kibana_dashboard_only_user` :: +Grants access to the {kib} Dashboard and read-only permissions on the `.kibana` +index. This role does not have access to editing tools in {kib}. For more +information, see +{kibana-ref}/xpack-dashboard-only-mode.html[{kib} Dashboard Only Mode]. + +[[built-in-roles-kibana-system]] `kibana_system` :: +Grants access necessary for the {kib} system user to read from and write to the +{kib} indices, manage index templates, and check the availability of the {es} cluster. +This role grants read access to the `.monitoring-*` indices and read and write access +to the `.reporting-*` indices. For more information, see +{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. + +[[built-in-roles-kibana-user]] `kibana_user`:: +Grants the minimum privileges required for any user of {kib}. This role grants +access to the {kib} indices and grants monitoring privileges for the cluster. + +[[built-in-roles-logstash-admin]] `logstash_admin` :: +Grants access to the `.logstash*` indices for managing configurations. + +[[built-in-roles-logstash-system]] `logstash_system` :: +Grants access necessary for the Logstash system user to send system-level data +(such as monitoring) to {es}. For more information, see +{logstash-ref}/ls-security.html[Configuring Security in Logstash]. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. ++ +NOTE: This role does not provide access to the logstash indices and is not +suitable for use within a Logstash pipeline. + +[[built-in-roles-beats-system]] `beats_system` :: +Grants access necessary for the Beats system user to send system-level data +(such as monitoring) to {es}. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. ++ +NOTE: This role does not provide access to the beats indices and is not +suitable for writing beats output to {es}. + +[[built-in-roles-ml-admin]] `machine_learning_admin`:: +Grants `manage_ml` cluster privileges and read access to the `.ml-*` indices. + +[[built-in-roles-ml-user]] `machine_learning_user`:: +Grants the minimum privileges required to view {xpackml} configuration, +status, and results. This role grants `monitor_ml` cluster privileges and +read access to the `.ml-notifications` and `.ml-anomalies*` indices, +which store {ml} results. + +[[built-in-roles-monitoring-user]] `monitoring_user`:: +Grants the minimum privileges required for any user of {monitoring} other than those +required to use {kib}. This role grants access to the monitoring indices and grants +privileges necessary for reading basic cluster information. Monitoring users should +also be assigned the `kibana_user` role. + +[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`:: +Grants the minimum privileges required for a remote monitoring agent to write data +into this cluster. + +[[built-in-roles-reporting-user]] `reporting_user`:: +Grants the specific privileges required for users of {reporting} other than those +required to use {kib}. This role grants access to the reporting indices. Reporting +users should also be assigned the `kibana_user` role and a role that grants them +access to the data that will be used to generate reports with. + +[[built-in-roles-superuser]] `superuser`:: +Grants full access to the cluster, including all indices and data. A user with +the `superuser` role can also manage users and roles and +<> any other user in the system. Due to the +permissive nature of this role, take extra care when assigning it to a user. + +[[built-in-roles-transport-client]] `transport_client`:: +Grants the privileges required to access the cluster through the Java Transport +Client. The Java Transport Client fetches information about the nodes in the +cluster using the _Node Liveness API_ and the _Cluster State API_ (when +sniffing is enabled). Assign your users this role if they use the +Transport Client. ++ +NOTE: Using the Transport Client effectively means the users are granted access +to the cluster state. This means users can view the metadata over all indices, +index templates, mappings, node and basically everything about the cluster. +However, this role does not grant permission to view the data in all indices. + +[[built-in-roles-watcher-admin]] `watcher_admin`:: ++ +Grants write access to the `.watches` index, read access to the watch history and +the triggered watches index and allows to execute all watcher actions. + +[[built-in-roles-watcher-user]] `watcher_user`:: ++ +Grants read access to the `.watches` index, the get watch action and the watcher +stats. \ No newline at end of file diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc new file mode 100644 index 0000000000000..83edef1a67ba4 --- /dev/null +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -0,0 +1,175 @@ +[role="xpack"] +[[defining-roles]] +=== Defining roles + +A role is defined by the following JSON structure: + +[source,js] +----- +{ + "run_as": [ ... ], <1> + "cluster": [ ... ], <2> + "indices": [ ... ] <3> +} +----- +<1> A list of usernames the owners of this role can <>. +<2> A list of cluster privileges. These privileges define the + cluster level actions users with this role are able to execute. This field + is optional (missing `cluster` privileges effectively mean no cluster level + permissions). +<3> A list of indices permissions entries. This field is optional (missing `indices` + privileges effectively mean no index level permissions). + +[[valid-role-name]] +NOTE: Role names must be at least 1 and no more than 1024 characters. They can + contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, + punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. + Leading or trailing whitespace is not allowed. + +The following describes the structure of an indices permissions entry: + +[source,js] +------- +{ + "names": [ ... ], <1> + "privileges": [ ... ], <2> + "field_security" : { ... }, <3> + "query": "..." <4> +} +------- +<1> A list of indices (or index name patterns) to which the permissions in this + entry apply. +<2> The index level privileges the owners of the role have on the associated + indices (those indices that are specified in the `name` field) +<3> Specification for document fields the owners of the role have read access to. + See <> for details. +<4> A search query that defines the documents the owners of the role have read + access to. A document within the associated indices must match this query + in order for it to be accessible by the owners of the role. + +[TIP] +============================================================================== +When specifying index names, you can use indices and aliases with their full +names or regular expressions that refer to multiple indices. + +* Wildcard (default) - simple wildcard matching where `*` is a placeholder + for zero or more characters, `?` is a placeholder for a single character + and `\` may be used as an escape character. + +* Regular Expressions - A more powerful syntax for matching more complex + patterns. This regular expression is based on Lucene's regexp automaton + syntax. To enable this syntax, it must be wrapped within a pair of + forward slashes (`/`). Any pattern starting with `/` and not ending with + `/` is considered to be malformed. + +.Example Regular Expressions +[source,yaml] +------------------------------------------------------------------------------ +"foo-bar": # match the literal `foo-bar` +"foo-*": # match anything beginning with "foo-" +"logstash-201?-*": # ? matches any one character +"/.*-201[0-9]-.*/": # use a regex to match anything containing 2010-2019 +"/foo": # syntax error - missing final / +------------------------------------------------------------------------------ +============================================================================== + +The following snippet shows an example definition of a `clicks_admin` role: + +[source,js] +----------- +{ + "run_as": [ "clicks_watcher_1" ] + "cluster": [ "monitor" ], + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "field_security" : { + "grant" : [ "category", "@timestamp", "message" ] + }, + "query": "{\"match\": {\"category\": \"click\"}}" + } + ] +} +----------- + +Based on the above definition, users owning the `clicks_admin` role can: + + * Impersonate the `clicks_watcher_1` user and execute requests on its behalf. + * Monitor the {es} cluster + * Read data from all indices prefixed with `events-` + * Within these indices, only read the events of the `click` category + * Within these document, only read the `category`, `@timestamp` and `message` + fields. + +TIP: For a complete list of available <> + +There are two available mechanisms to define roles: using the _Role Management APIs_ +or in local files on the {es} nodes. {security} also supports implementing +custom roles providers. If you need to integrate with another system to retrieve +user roles, you can build a custom roles provider plugin. For more information, +see <>. + +[float] +[[roles-management-ui]] +=== Role management UI + +{security} enables you to easily manage users and roles from within {kib}. To +manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. + +[float] +[[roles-management-api]] +=== Role management API + +The _Role Management APIs_ enable you to add, update, remove and retrieve roles +dynamically. When you use the APIs to manage roles in the `native` realm, the +roles are stored in an internal {es} index. For more information and examples, +see {ref}/security-api-roles.html[Role Management APIs]. + +[float] +[[roles-management-file]] +=== File-based role management + +Apart from the _Role Management APIs_, roles can also be defined in local +`roles.yml` file located in `CONFIG_DIR`. This is a YAML file where each +role definition is keyed by its name. + +[IMPORTANT] +============================== +If the same role name is used in the `roles.yml` file and through the +_Role Management APIs_, the role found in the file will be used. +============================== + +While the _Role Management APIs_ is the preferred mechanism to define roles, +using the `roles.yml` file becomes useful if you want to define fixed roles that +no one (beside an administrator having physical access to the {es} nodes) +would be able to change. + +[IMPORTANT] +============================== +The `roles.yml` file is managed locally by the node and is not globally by the +cluster. This means that with a typical multi-node cluster, the exact same +changes need to be applied on each and every node in the cluster. + +A safer approach would be to apply the change on one of the nodes and have the +`roles.yml` distributed/copied to all other nodes in the cluster (either +manually or using a configuration management system such as Puppet or Chef). +============================== + +The following snippet shows an example of the `roles.yml` file configuration: + +[source,yaml] +----------------------------------- +click_admins: + run_as: [ 'clicks_watcher_1' ] + cluster: [ 'monitor' ] + indices: + - names: [ 'events-*' ] + privileges: [ 'read' ] + field_security: + grant: ['category', '@timestamp', 'message' ] + query: '{"match": {"category": "click"}}' +----------------------------------- + +{security} continuously monitors the `roles.yml` file and automatically picks +up and applies any changes to it. diff --git a/x-pack/docs/en/security/authorization/overview.asciidoc b/x-pack/docs/en/security/authorization/overview.asciidoc index 9dc8185db4d34..98a1ad8b786b6 100644 --- a/x-pack/docs/en/security/authorization/overview.asciidoc +++ b/x-pack/docs/en/security/authorization/overview.asciidoc @@ -49,295 +49,11 @@ As an administrator, you will need to define the roles that you want to use, then assign users to the roles. These can be assigned to users in a number of ways depending on the realms by which the users are authenticated. -[[built-in-roles]] -=== Built-in roles +include::built-in-roles.asciidoc[] -{security} applies a default role to all users, including -<>. The default role enables users to access -the authenticate endpoint, change their own passwords, and get information about -themselves. +include::managing-roles.asciidoc[] -{security} also provides a set of built-in roles you can explicitly assign -to users. These roles have a fixed set of privileges and cannot be updated. - -[[built-in-roles-ingest-user]] `ingest_admin` :: -Grants access to manage *all* index templates and *all* ingest pipeline configurations. -+ -NOTE: This role does *not* provide the ability to create indices; those privileges -must be defined in a separate role. - -[[built-in-roles-kibana-dashboard]] `kibana_dashboard_only_user` :: -Grants access to the {kib} Dashboard and read-only permissions on the `.kibana` -index. This role does not have access to editing tools in {kib}. For more -information, see -{kibana-ref}/xpack-dashboard-only-mode.html[{kib} Dashboard Only Mode]. - -[[built-in-roles-kibana-system]] `kibana_system` :: -Grants access necessary for the {kib} system user to read from and write to the -{kib} indices, manage index templates, and check the availability of the {es} cluster. -This role grants read access to the `.monitoring-*` indices and read and write access -to the `.reporting-*` indices. For more information, see -{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. -+ -NOTE: This role should not be assigned to users as the granted permissions may -change between releases. - -[[built-in-roles-kibana-user]] `kibana_user`:: -Grants the minimum privileges required for any user of {kib}. This role grants -access to the {kib} indices and grants monitoring privileges for the cluster. - -[[built-in-roles-logstash-admin]] `logstash_admin` :: -Grants access to the `.logstash*` indices for managing configurations. - -[[built-in-roles-logstash-system]] `logstash_system` :: -Grants access necessary for the Logstash system user to send system-level data -(such as monitoring) to {es}. For more information, see -{logstash-ref}/ls-security.html[Configuring Security in Logstash]. -+ -NOTE: This role should not be assigned to users as the granted permissions may -change between releases. -+ -NOTE: This role does not provide access to the logstash indices and is not -suitable for use within a Logstash pipeline. - -[[built-in-roles-beats-system]] `beats_system` :: -Grants access necessary for the Beats system user to send system-level data -(such as monitoring) to {es}. -+ -NOTE: This role should not be assigned to users as the granted permissions may -change between releases. -+ -NOTE: This role does not provide access to the beats indices and is not -suitable for writing beats output to {es}. - -[[built-in-roles-ml-admin]] `machine_learning_admin`:: -Grants `manage_ml` cluster privileges and read access to the `.ml-*` indices. - -[[built-in-roles-ml-user]] `machine_learning_user`:: -Grants the minimum privileges required to view {xpackml} configuration, -status, and results. This role grants `monitor_ml` cluster privileges and -read access to the `.ml-notifications` and `.ml-anomalies*` indices, -which store {ml} results. - -[[built-in-roles-monitoring-user]] `monitoring_user`:: -Grants the minimum privileges required for any user of {monitoring} other than those -required to use {kib}. This role grants access to the monitoring indices and grants -privileges necessary for reading basic cluster information. Monitoring users should -also be assigned the `kibana_user` role. - -[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`:: -Grants the minimum privileges required for a remote monitoring agent to write data -into this cluster. - -[[built-in-roles-reporting-user]] `reporting_user`:: -Grants the specific privileges required for users of {reporting} other than those -required to use {kib}. This role grants access to the reporting indices. Reporting -users should also be assigned the `kibana_user` role and a role that grants them -access to the data that will be used to generate reports with. - -[[built-in-roles-superuser]] `superuser`:: -Grants full access to the cluster, including all indices and data. A user with -the `superuser` role can also manage users and roles and -<> any other user in the system. Due to the -permissive nature of this role, take extra care when assigning it to a user. - -[[built-in-roles-transport-client]] `transport_client`:: -Grants the privileges required to access the cluster through the Java Transport -Client. The Java Transport Client fetches information about the nodes in the -cluster using the _Node Liveness API_ and the _Cluster State API_ (when -sniffing is enabled). Assign your users this role if they use the -Transport Client. -+ -NOTE: Using the Transport Client effectively means the users are granted access -to the cluster state. This means users can view the metadata over all indices, -index templates, mappings, node and basically everything about the cluster. -However, this role does not grant permission to view the data in all indices. - -[[built-in-roles-watcher-admin]] `watcher_admin`:: -+ -Grants write access to the `.watches` index, read access to the watch history and -the triggered watches index and allows to execute all watcher actions. - -[[built-in-roles-watcher-user]] `watcher_user`:: -+ -Grants read access to the `.watches` index, the get watch action and the watcher -stats. - - -[[defining-roles]] -=== Defining roles - -A role is defined by the following JSON structure: - -[source,js] ------ -{ - "run_as": [ ... ], <1> - "cluster": [ ... ], <2> - "indices": [ ... ] <3> -} ------ -<1> A list of usernames the owners of this role can <>. -<2> A list of cluster privileges. These privileges define the - cluster level actions users with this role are able to execute. This field - is optional (missing `cluster` privileges effectively mean no cluster level - permissions). -<3> A list of indices permissions entries. This field is optional (missing `indices` - privileges effectively mean no index level permissions). - -[[valid-role-name]] -NOTE: Role names must be at least 1 and no more than 1024 characters. They can - contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, - punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. - Leading or trailing whitespace is not allowed. - -The following describes the structure of an indices permissions entry: - -[source,js] -------- -{ - "names": [ ... ], <1> - "privileges": [ ... ], <2> - "field_security" : { ... }, <3> - "query": "..." <4> -} -------- -<1> A list of indices (or index name patterns) to which the permissions in this - entry apply. -<2> The index level privileges the owners of the role have on the associated - indices (those indices that are specified in the `name` field) -<3> Specification for document fields the owners of the role have read access to. - See <> for details. -<4> A search query that defines the documents the owners of the role have read - access to. A document within the associated indices must match this query - in order for it to be accessible by the owners of the role. - -[TIP] -============================================================================== -When specifying index names, you can use indices and aliases with their full -names or regular expressions that refer to multiple indices. - -* Wildcard (default) - simple wildcard matching where `*` is a placeholder - for zero or more characters, `?` is a placeholder for a single character - and `\` may be used as an escape character. - -* Regular Expressions - A more powerful syntax for matching more complex - patterns. This regular expression is based on Lucene's regexp automaton - syntax. To enable this syntax, it must be wrapped within a pair of - forward slashes (`/`). Any pattern starting with `/` and not ending with - `/` is considered to be malformed. - -.Example Regular Expressions -[source,yaml] ------------------------------------------------------------------------------- -"foo-bar": # match the literal `foo-bar` -"foo-*": # match anything beginning with "foo-" -"logstash-201?-*": # ? matches any one character -"/.*-201[0-9]-.*/": # use a regex to match anything containing 2010-2019 -"/foo": # syntax error - missing final / ------------------------------------------------------------------------------- -============================================================================== - -The following snippet shows an example definition of a `clicks_admin` role: - -[source,js] ------------ -{ - "run_as": [ "clicks_watcher_1" ] - "cluster": [ "monitor" ], - "indices": [ - { - "names": [ "events-*" ], - "privileges": [ "read" ], - "field_security" : { - "grant" : [ "category", "@timestamp", "message" ] - }, - "query": "{\"match\": {\"category\": \"click\"}}" - } - ] -} ------------ - -Based on the above definition, users owning the `clicks_admin` role can: - - * Impersonate the `clicks_watcher_1` user and execute requests on its behalf. - * Monitor the {es} cluster - * Read data from all indices prefixed with `events-` - * Within these indices, only read the events of the `click` category - * Within these document, only read the `category`, `@timestamp` and `message` - fields. - -TIP: For a complete list of available <> - -There are two available mechanisms to define roles: using the _Role Management APIs_ -or in local files on the {es} nodes. {security} also supports implementing -custom roles providers. If you need to integrate with another system to retrieve -user roles, you can build a custom roles provider plugin. For more information, -see <>. - -[float] -[[roles-management-ui]] -=== Role management UI - -{security} enables you to easily manage users and roles from within {kib}. To -manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. - -[float] -[[roles-management-api]] -=== Role management API - -The _Role Management APIs_ enable you to add, update, remove and retrieve roles -dynamically. When you use the APIs to manage roles in the `native` realm, the -roles are stored in an internal {es} index. For more information and examples, -see {ref}/security-api-roles.html[Role Management APIs]. - -[float] -[[roles-management-file]] -=== File-based role management - -Apart from the _Role Management APIs_, roles can also be defined in local -`roles.yml` file located in `CONFIG_DIR`. This is a YAML file where each -role definition is keyed by its name. - -[IMPORTANT] -============================== -If the same role name is used in the `roles.yml` file and through the -_Role Management APIs_, the role found in the file will be used. -============================== - -While the _Role Management APIs_ is the preferred mechanism to define roles, -using the `roles.yml` file becomes useful if you want to define fixed roles that -no one (beside an administrator having physical access to the {es} nodes) -would be able to change. - -[IMPORTANT] -============================== -The `roles.yml` file is managed locally by the node and is not globally by the -cluster. This means that with a typical multi-node cluster, the exact same -changes need to be applied on each and every node in the cluster. - -A safer approach would be to apply the change on one of the nodes and have the -`roles.yml` distributed/copied to all other nodes in the cluster (either -manually or using a configuration management system such as Puppet or Chef). -============================== - -The following snippet shows an example of the `roles.yml` file configuration: - -[source,yaml] ------------------------------------ -click_admins: - run_as: [ 'clicks_watcher_1' ] - cluster: [ 'monitor' ] - indices: - - names: [ 'events-*' ] - privileges: [ 'read' ] - field_security: - grant: ['category', '@timestamp', 'message' ] - query: '{"match": {"category": "click"}}' ------------------------------------ - -{security} continuously monitors the `roles.yml` file and automatically picks -up and applies any changes to it. +include::privileges.asciidoc[] include::alias-privileges.asciidoc[] diff --git a/x-pack/docs/en/security/reference/privileges.asciidoc b/x-pack/docs/en/security/authorization/privileges.asciidoc similarity index 97% rename from x-pack/docs/en/security/reference/privileges.asciidoc rename to x-pack/docs/en/security/authorization/privileges.asciidoc index b467b58283d97..e5b22d3674a76 100644 --- a/x-pack/docs/en/security/reference/privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/privileges.asciidoc @@ -1,10 +1,11 @@ +[role="xpack"] [[security-privileges]] -=== Security Privileges +=== Security privileges This section lists the privileges that you can assign to a role. [[privileges-list-cluster]] -==== Cluster Privileges +==== Cluster privileges [horizontal] `all`:: @@ -66,7 +67,7 @@ All privileges necessary for a transport client to connect. Required by the rem cluster to enable <>. [[privileges-list-indices]] -==== Indices Privileges +==== Indices privileges [horizontal] `all`:: @@ -125,7 +126,7 @@ Privilege to create an index. A create index request may contain aliases to be added to the index once created. In that case the request requires the `manage` privilege as well, on both the index and the aliases names. -==== Run As Privilege +==== Run as privilege The `run_as` permission enables an authenticated user to submit requests on behalf of another user. The value can be a user name or a comma-separated list diff --git a/x-pack/docs/en/security/reference.asciidoc b/x-pack/docs/en/security/reference.asciidoc index 21138138cfbf9..9c65fd6479a4f 100644 --- a/x-pack/docs/en/security/reference.asciidoc +++ b/x-pack/docs/en/security/reference.asciidoc @@ -7,6 +7,4 @@ * {ref}/security-api.html[Security API] * {ref}/xpack-commands.html[Security Commands] -include::reference/privileges.asciidoc[] - include::reference/files.asciidoc[] From 8ff9baeb823b2d377b833569b291be2aacc26679 Mon Sep 17 00:00:00 2001 From: lcawl Date: Wed, 16 May 2018 13:11:06 -0700 Subject: [PATCH 03/25] [DOCS] Fixes list of unconverted snippets in build.gradle --- x-pack/docs/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index ede446d6074f8..314ffced4a0f7 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -81,7 +81,7 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/ml/validate-job.asciidoc', 'en/rest-api/security/authenticate.asciidoc', 'en/rest-api/watcher/stats.asciidoc', - 'en/security/authorization/overview.asciidoc', + 'en/security/authorization/managing-roles.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] From f0da3da6b0e842374bd63b6f9d69dd55eea774a8 Mon Sep 17 00:00:00 2001 From: Shashwat Anand Date: Thu, 17 May 2018 01:56:23 +0530 Subject: [PATCH 04/25] Reindex: Fixed typo in assertion failure message (#30619) Fix a typo in an assertion failure message. --- .../java/org/elasticsearch/index/reindex/RestReindexAction.java | 2 +- .../org/elasticsearch/index/reindex/RestReindexActionTests.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index f218d6ae8dfaa..f1ac681b59fdf 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -115,7 +115,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client @Override protected ReindexRequest buildRequest(RestRequest request) throws IOException { if (request.hasParam("pipeline")) { - throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parmaeter. " + throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parameter. " + "Specify it in the [dest] object instead."); } ReindexRequest internal = new ReindexRequest(new SearchRequest(), new IndexRequest()); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index 1c33ccdaaa289..88fa31f423a21 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -149,7 +149,7 @@ public void testPipelineQueryParameterIsError() throws IOException { request.withParams(singletonMap("pipeline", "doesn't matter")); Exception e = expectThrows(IllegalArgumentException.class, () -> action.buildRequest(request.build())); - assertEquals("_reindex doesn't support [pipeline] as a query parmaeter. Specify it in the [dest] object instead.", e.getMessage()); + assertEquals("_reindex doesn't support [pipeline] as a query parameter. Specify it in the [dest] object instead.", e.getMessage()); } public void testSetScrollTimeout() throws IOException { From 01bdfcde6f7b4d33b4ac37475f65a9bcbd5e963a Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 16 May 2018 23:35:23 +0300 Subject: [PATCH 05/25] [ML] DeleteExpiredDataAction should use client with origin (#30646) This is an admin action that should be allowed to operate on ML indices with full permissions. --- .../ml/action/TransportDeleteExpiredDataAction.java | 3 ++- .../xpack/ml/job/retention/ExpiredForecastsRemover.java | 4 ++++ .../ml/job/retention/ExpiredModelSnapshotsRemover.java | 4 ++++ .../xpack/ml/job/retention/ExpiredResultsRemover.java | 9 +++++---- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java index 6cf06695c7c91..0e1ca9dd9aec3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.retention.ExpiredForecastsRemover; @@ -40,7 +41,7 @@ public TransportDeleteExpiredDataAction(Settings settings, ThreadPool threadPool Client client, ClusterService clusterService) { super(settings, DeleteExpiredDataAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, DeleteExpiredDataAction.Request::new); - this.client = client; + this.client = ClientHelper.clientWithOrigin(client, ClientHelper.ML_ORIGIN); this.clusterService = clusterService; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java index 30c49a834be58..75deb7bf0ae6d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java @@ -45,6 +45,10 @@ * Removes up to {@link #MAX_FORECASTS} forecasts (stats + forecasts docs) that have expired. * A forecast is deleted if its expiration timestamp is earlier * than the start of the current day (local time-zone). + * + * This is expected to be used by actions requiring admin rights. Thus, + * it is also expected that the provided client will be a client with the + * ML origin so that permissions to manage ML indices are met. */ public class ExpiredForecastsRemover implements MlDataRemover { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java index 3b1105774ea66..8808ed34277a4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java @@ -34,6 +34,10 @@ * of their respective job with the exception of the currently used snapshot. * A snapshot is deleted if its timestamp is earlier than the start of the * current day (local time-zone) minus the retention period. + * + * This is expected to be used by actions requiring admin rights. Thus, + * it is also expected that the provided client will be a client with the + * ML origin so that permissions to manage ML indices are met. */ public class ExpiredModelSnapshotsRemover extends AbstractExpiredJobDataRemover { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index 3f0ca4558b570..f59fdddedecdb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -33,14 +33,15 @@ import java.time.format.DateTimeFormatter; import java.util.Objects; -import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; - /** * Removes all results that have expired the configured retention time * of their respective job. A result is deleted if its timestamp is earlier * than the start of the current day (local time-zone) minus the retention * period. + * + * This is expected to be used by actions requiring admin rights. Thus, + * it is also expected that the provided client will be a client with the + * ML origin so that permissions to manage ML indices are met. */ public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover { @@ -65,7 +66,7 @@ protected void removeDataBefore(Job job, long cutoffEpochMs, ActionListener() { + client.execute(DeleteByQueryAction.INSTANCE, request, new ActionListener() { @Override public void onResponse(BulkByScrollResponse bulkByScrollResponse) { try { From e0ccd4b816595bd3c71875653aa868ef84f5db45 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 16 May 2018 16:34:48 -0400 Subject: [PATCH 06/25] Mute ShrinkIndexIT This is tracked at https://issues.apache.org/jira/browse/LUCENE-8318 --- .../action/admin/indices/create/ShrinkIndexIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index e48f151081f62..d89a8a134ff7c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -83,6 +84,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); } + @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8318") public void testCreateShrinkIndexToN() { int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}}; int[] shardSplits = randomFrom(possibleShardSplits); From a4c9c2fa2ae67e89d1080c674f6f790ccb2e27f2 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 16 May 2018 15:35:57 -0700 Subject: [PATCH 07/25] Make xpack modules instead of a meta plugin (#30589) This commit removes xpack from being a meta-plugin-as-a-module. It also fixes a couple tests which were missing task dependencies, which failed once the gradle execution order changed. --- distribution/archives/build.gradle | 2 +- distribution/build.gradle | 11 +++--- x-pack/plugin/build.gradle | 10 ++---- x-pack/plugin/core/src/main/bin/x-pack-env | 2 +- .../plugin/core/src/main/bin/x-pack-env.bat | 2 +- .../security/src/main/bin/x-pack-security-env | 2 +- .../src/main/bin/x-pack-security-env.bat | 2 +- .../watcher/src/main/bin/x-pack-watcher-env | 2 +- .../src/main/bin/x-pack-watcher-env.bat | 2 +- x-pack/qa/vagrant/build.gradle | 35 ------------------- .../test/resources/packaging/utils/xpack.bash | 9 ----- 11 files changed, 13 insertions(+), 66 deletions(-) diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 9fa06021236a2..5d1703399aad4 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -224,7 +224,7 @@ subprojects { doLast { // this is just a small sample from the C++ notices, the idea being that if we've added these lines we've probably added all the required lines final List expectedLines = Arrays.asList("Apache log4cxx", "Boost Software License - Version 1.0 - August 17th, 2003") - final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack/x-pack-ml/NOTICE.txt") + final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack-ml/NOTICE.txt") final List actualLines = Files.readAllLines(noticePath) for (final String expectedLine : expectedLines) { if (actualLines.contains(expectedLine) == false) { diff --git a/distribution/build.gradle b/distribution/build.gradle index 266cb8f8b270a..d2e2810bc7eec 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -201,17 +201,14 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { // use licenses from each of the bundled xpack plugins Project xpack = project(':x-pack:plugin') -xpack.subprojects.findAll { it.name != 'bwc' }.each { Project xpackSubproject -> - File licenses = new File(xpackSubproject.projectDir, 'licenses') +xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule -> + File licenses = new File(xpackModule.projectDir, 'licenses') if (licenses.exists()) { buildDefaultNotice.licensesDir licenses } + copyModule(processDefaultOutputs, xpackModule) + copyLog4jProperties(buildDefaultLog4jConfig, xpackModule) } -// but copy just the top level meta plugin to the default modules -copyModule(processDefaultOutputs, xpack) -copyLog4jProperties(buildDefaultLog4jConfig, xpack) - -// // make sure we have a clean task since we aren't a java project, but we have tasks that // put stuff in the build dir diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index e4dc314eb72a7..4a0b29c42582a 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -8,17 +8,11 @@ import java.nio.file.Path import java.nio.file.StandardCopyOption import org.elasticsearch.gradle.test.RunTask; -apply plugin: 'elasticsearch.es-meta-plugin' +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' archivesBaseName = 'x-pack' -es_meta_plugin { - name = 'x-pack' - description = 'Elasticsearch Expanded Pack Plugin' - plugins = ['core', 'deprecation', 'graph', 'logstash', - 'ml', 'monitoring', 'security', 'upgrade', 'watcher', 'sql', 'rollup'] -} - dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/core/src/main/bin/x-pack-env b/x-pack/plugin/core/src/main/bin/x-pack-env index fb5489cfebc43..cc25b86f69873 100644 --- a/x-pack/plugin/core/src/main/bin/x-pack-env +++ b/x-pack/plugin/core/src/main/bin/x-pack-env @@ -5,4 +5,4 @@ # you may not use this file except in compliance with the Elastic License. # include x-pack-core jars in classpath -ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-core/*" +ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-core/*" diff --git a/x-pack/plugin/core/src/main/bin/x-pack-env.bat b/x-pack/plugin/core/src/main/bin/x-pack-env.bat index de45a53c9269c..fc97721a737d1 100644 --- a/x-pack/plugin/core/src/main/bin/x-pack-env.bat +++ b/x-pack/plugin/core/src/main/bin/x-pack-env.bat @@ -2,4 +2,4 @@ rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one rem or more contributor license agreements. Licensed under the Elastic License; rem you may not use this file except in compliance with the Elastic License. -set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-core/* +set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-core/* diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env b/x-pack/plugin/security/src/main/bin/x-pack-security-env index fd35535be8cca..3a2b15e13fa4a 100644 --- a/x-pack/plugin/security/src/main/bin/x-pack-security-env +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env @@ -7,4 +7,4 @@ source "`dirname "$0"`"/x-pack-env # include x-pack-security jars in classpath -ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-security/*" +ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-security/*" diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat index 610f5835d28c2..035f1c965ffb6 100644 --- a/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat @@ -4,4 +4,4 @@ rem you may not use this file except in compliance with the Elastic License. call "%~dp0x-pack-env.bat" || exit /b 1 -set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-security/* +set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-security/* diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env index 4abe3d8c60761..13718a01b4330 100644 --- a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env @@ -7,4 +7,4 @@ source "`dirname "$0"`"/x-pack-env # include x-pack-security jars in classpath -ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-watcher/*" +ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-watcher/*" diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat index 9e43ffaa0521f..010c154eb5a39 100644 --- a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat @@ -4,4 +4,4 @@ rem you may not use this file except in compliance with the Elastic License. call "%~dp0x-pack-env.bat" || exit /b 1 -set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-watcher/* +set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-watcher/* diff --git a/x-pack/qa/vagrant/build.gradle b/x-pack/qa/vagrant/build.gradle index 0c3428f258c0e..c69214578fd16 100644 --- a/x-pack/qa/vagrant/build.gradle +++ b/x-pack/qa/vagrant/build.gradle @@ -11,41 +11,6 @@ esvagrant { } dependencies { - // Packaging tests use the x-pack meta plugin - packaging project(path: xpackProject('plugin').path, configuration: 'zip') - // Inherit Bats test utils from :qa:vagrant project packaging project(path: ':qa:vagrant', configuration: 'packaging') } - -Map> metaPlugins = [:] -for (Project metaPlugin : project.rootProject.subprojects) { - if (metaPlugin.plugins.hasPlugin(MetaPluginBuildPlugin)) { - MetaPluginPropertiesExtension extension = metaPlugin.extensions.findByName('es_meta_plugin') - if (extension != null) { - List plugins = [] - metaPlugin.subprojects.each { - if (extension.plugins.contains(it.name)) { - Project plugin = (Project) it - if (plugin.plugins.hasPlugin(PluginBuildPlugin)) { - PluginPropertiesExtension esplugin = plugin.extensions.findByName('esplugin') - if (esplugin != null) { - plugins.add(esplugin.name) - } - } - } - } - metaPlugins.put(extension.name, plugins.toSorted()) - } - } -} - -setupPackagingTest { - doLast { - metaPlugins.each{ name, plugins -> - File expectedMetaPlugins = file("build/plugins/${name}.expected") - expectedMetaPlugins.parentFile.mkdirs() - expectedMetaPlugins.setText(plugins.join('\n'), 'UTF-8') - } - } -} diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash index 95ab2a08d3e57..3e44ee9f83a58 100644 --- a/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash +++ b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash @@ -69,15 +69,6 @@ verify_xpack_installation() { done # nocommit: decide whether to check the files added by the distribution, not part of xpack... #assert_number_of_files "$ESCONFIG/" $configFilesCount - - # Read the $name.expected file that contains all the expected - # plugins for the meta plugin - while read plugin; do - assert_module_or_plugin_directory "$ESMODULES/$name/$plugin" - assert_file_exist "$ESMODULES/$name/$plugin/$plugin"*".jar" - assert_file_exist "$ESMODULES/$name/$plugin/plugin-descriptor.properties" - assert_file_exist "$ESMODULES/$name/$plugin/plugin-security.policy" - done Date: Wed, 16 May 2018 16:42:08 -0700 Subject: [PATCH 08/25] [test] packaging: add windows boxes (#30402) Adds windows server 2012r2 and 2016 vagrant boxes to packaging tests. They can only be used if IDs for their images are specified, which are passed to gradle and then to vagrant via env variables. Adds options to the project property `vagrant.boxes` to choose between linux and windows boxes. Bats tests are run only on linux boxes, and portable packaging tests run on all boxes. Platform tests are only run on linux boxes since they are not being maintained. For #26741 --- TESTING.asciidoc | 155 +++++++---- Vagrantfile | 39 +++ .../gradle/vagrant/VagrantTestPlugin.groovy | 261 +++++++++++++----- 3 files changed, 331 insertions(+), 124 deletions(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 2e4a6ede754ad..15cdfdd1c52ba 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -303,15 +303,16 @@ comma separated list of nodes to connect to (e.g. localhost:9300). A transport c be created based on that and used for all the before|after test operations, and to extract the http addresses of the nodes so that REST requests can be sent to them. -== Testing scripts +== Testing packaging -The simplest way to test scripts and the packaged distributions is to use -Vagrant. You can get started by following there five easy steps: +The packaging tests use Vagrant virtual machines to verify that installing +and running elasticsearch distributions works correctly on supported operating systems. +These tests should really only be run in vagrant vms because they're destructive. . Install Virtual Box and Vagrant. -. (Optional) Install vagrant-cachier to squeeze a bit more performance out of -the process: +. (Optional) Install https://github.com/fgrehm/vagrant-cachier[vagrant-cachier] to squeeze +a bit more performance out of the process: -------------------------------------- vagrant plugin install vagrant-cachier @@ -325,26 +326,39 @@ vagrant plugin install vagrant-cachier . Download and smoke test the VMs with `./gradlew vagrantSmokeTest` or `./gradlew -Pvagrant.boxes=all vagrantSmokeTest`. The first time you run this it will -download the base images and provision the boxes and immediately quit. If you -you this again it'll skip the download step. +download the base images and provision the boxes and immediately quit. Downloading all +the images may take a long time. After the images are already on your machine, they won't +be downloaded again unless they have been updated to a new version. . Run the tests with `./gradlew packagingTest`. This will cause Gradle to build the tar, zip, and deb packages and all the plugins. It will then run the tests on ubuntu-1404 and centos-7. We chose those two distributions as the default because they cover deb and rpm packaging and SyvVinit and systemd. -You can run on all the VMs by running `./gradlew -Pvagrant.boxes=all -packagingTest`. You can run a particular VM with a command like `./gradlew --Pvagrant.boxes=oel-7 packagingTest`. See `./gradlew tasks` for a complete list -of available vagrant boxes for testing. It's important to know that if you -interrupt any of these Gradle commands then the boxes will remain running and -you'll have to terminate them with `./gradlew stop`. +You can choose which boxes to test by setting the `-Pvagrant.boxes` project property. All of +the valid options for this property are: + +* `sample` - The default, only chooses ubuntu-1404 and centos-7 +* List of box names, comma separated (e.g. `oel-7,fedora-26`) - Chooses exactly the boxes listed. +* `linux-all` - All linux boxes. +* `windows-all` - All Windows boxes. If there are any Windows boxes which do not +have images available when this value is provided, the build will fail. +* `all` - All boxes we test. If there are any boxes (e.g. Windows) which do not have images +available when this value is provided, the build will fail. + +For a complete list of boxes on which tests can be run, run `./gradlew :qa:vagrant:listAllBoxes`. +For a list of boxes that have images available from your configuration, run +`./gradlew :qa:vagrant:listAvailableBoxes` + +Note that if you interrupt gradle in the middle of running these tasks, any boxes started +will remain running and you'll have to stop them manually with `./gradlew stop` or +`vagrant halt`. All the regular vagrant commands should just work so you can get a shell in a VM running trusty by running `vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404`. -These are the linux flavors the Vagrantfile currently supports: +These are the linux flavors supported, all of which we provide images for * ubuntu-1404 aka trusty * ubuntu-1604 aka xenial @@ -364,9 +378,42 @@ quality boxes available in vagrant atlas: * sles-11 -We're missing the following because our tests are very linux/bash centric: +=== Testing packaging on Windows + +The packaging tests also support Windows Server 2012R2 and Windows Server 2016. +Unfortunately we're not able to provide boxes for them in open source use +because of licensing issues. Any Virtualbox image that has WinRM and Powershell +enabled for remote users should work. + +Testing on Windows requires the https://github.com/criteo/vagrant-winrm[vagrant-winrm] plugin. + +------------------------------------ +vagrant plugin install vagrant-winrm +------------------------------------ + +Specify the image IDs of the Windows boxes to gradle with the following project +properties. They can be set in `~/.gradle/gradle.properties` like -* Windows Server 2012 +------------------------------------ +vagrant.windows-2012r2.id=my-image-id +vagrant.windows-2016.id=another-image-id +------------------------------------ + +or passed on the command line like `-Pvagrant.windows-2012r2.id=my-image-id` +`-Pvagrant.windows-2016=another-image-id` + +These properties are required for Windows support in all gradle tasks that +handle packaging tests. Either or both may be specified. Remember that to run tests +on these boxes, the project property `vagrant.boxes` still needs to be set to a +value that will include them. + +If you're running vagrant commands outside of gradle, specify the Windows boxes +with the environment variables + +* `VAGRANT_WINDOWS_2012R2_BOX` +* `VAGRANT_WINDOWS_2016_BOX` + +=== Testing VMs are disposable It's important to think of VMs like cattle. If they become lame you just shoot them and let vagrant reprovision them. Say you've hosed your precise VM: @@ -399,54 +446,62 @@ vagrant destroy -f `vagrant up` would normally start all the VMs but we've prevented that because that'd consume a ton of ram. -== Testing scripts more directly +=== Iterating on packaging tests -In general its best to stick to testing in vagrant because the bats scripts are -destructive. When working with a single package it's generally faster to run its -tests in a tighter loop than Gradle provides. In one window: +Running the packaging tests through gradle can take a while because it will start +and stop the VM each time. You can iterate faster by keeping the VM up and running +the tests directly. --------------------------------- -./gradlew :distribution:packages:rpm:assemble --------------------------------- +The packaging tests use a random seed to determine which past version to use for +testing upgrades. To use a single past version fix the test seed when running +the commands below (see <>) -and in another window: +First build the packaging tests and their dependencies ----------------------------------------------------- -vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7 -cd $PACKAGING_ARCHIVES -sudo -E bats $BATS_TESTS/*rpm*.bats ----------------------------------------------------- +-------------------------------------------- +./gradlew :qa:vagrant:setupPackagingTest +-------------------------------------------- -If you wanted to retest all the release artifacts on a single VM you could: +Then choose the VM you want to test on and bring it up. For example, to bring +up Debian 9 use the gradle command below. Bringing the box up with vagrant directly +may not mount the packaging test project in the right place. Once the VM is up, ssh +into it -------------------------------------------------- -./gradlew setupPackagingTest -cd qa/vagrant; vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404 +-------------------------------------------- +./gradlew :qa:vagrant:vagrantDebian9#up +vagrant ssh debian-9 +-------------------------------------------- + +Now inside the VM, to run the https://github.com/sstephenson/bats[bats] packaging tests + +-------------------------------------------- cd $PACKAGING_ARCHIVES -sudo -E bats $BATS_TESTS/*.bats -------------------------------------------------- -You can also use Gradle to prepare the test environment and then starts a single VM: +# runs all bats tests +sudo bats $BATS_TESTS/*.bats -------------------------------------------------- -./gradlew vagrantFedora27#up -------------------------------------------------- +# you can also pass specific test files +sudo bats $BATS_TESTS/20_tar_package.bats $BATS_TESTS/25_tar_plugins.bats +-------------------------------------------- -Or any of vagrantCentos6#up, vagrantCentos7#up, vagrantDebian8#up, -vagrantDebian9#up, vagrantFedora26#up, vagrantFedora27#up, vagrantOel6#up, vagrantOel7#up, -vagrantOpensuse42#up,vagrantSles12#up, vagrantUbuntu1404#up, vagrantUbuntu1604#up. +To run the Java packaging tests, again inside the VM -Once up, you can then connect to the VM using SSH from the elasticsearch directory: +-------------------------------------------- +bash $PACKAGING_TESTS/run-tests.sh +-------------------------------------------- -------------------------------------------------- -vagrant ssh fedora-27 -------------------------------------------------- +or on Windows -Or from another directory: +-------------------------------------------- +powershell -File $Env:PACKAGING_TESTS/run-tests.ps1 +-------------------------------------------- -------------------------------------------------- -VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-27 -------------------------------------------------- +When you've made changes you want to test, keep the VM up and reload the tests and +distributions inside by running (on the host) + +-------------------------------------------- +./gradlew :qa:vagrant:clean :qa:vagrant:setupPackagingTest +-------------------------------------------- Note: Starting vagrant VM outside of the elasticsearch folder requires to indicates the folder that contains the Vagrantfile using the VAGRANT_CWD diff --git a/Vagrantfile b/Vagrantfile index 6761fec07dab2..1c259c1125f00 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -121,6 +121,26 @@ Vagrant.configure(2) do |config| sles_common config, box end end + + windows_2012r2_box = ENV['VAGRANT_WINDOWS_2012R2_BOX'] + if windows_2012r2_box && windows_2012r2_box.empty? == false + 'windows-2012r2'.tap do |box| + config.vm.define box, define_opts do |config| + config.vm.box = windows_2012r2_box + windows_common config, box + end + end + end + + windows_2016_box = ENV['VAGRANT_WINDOWS_2016_BOX'] + if windows_2016_box && windows_2016_box.empty? == false + 'windows-2016'.tap do |box| + config.vm.define box, define_opts do |config| + config.vm.box = windows_2016_box + windows_common config, box + end + end + end end def deb_common(config, name, extra: '') @@ -353,3 +373,22 @@ SUDOERS_VARS chmod 0440 /etc/sudoers.d/elasticsearch_vars SHELL end + +def windows_common(config, name) + config.vm.provision 'markerfile', type: 'shell', inline: <<-SHELL + $ErrorActionPreference = "Stop" + New-Item C:/is_vagrant_vm -ItemType file -Force | Out-Null + SHELL + + config.vm.provision 'set prompt', type: 'shell', inline: <<-SHELL + $ErrorActionPreference = "Stop" + $ps_prompt = 'function Prompt { "#{name}:$($ExecutionContext.SessionState.Path.CurrentLocation)>" }' + $ps_prompt | Out-File $PsHome/Microsoft.PowerShell_profile.ps1 + SHELL + + config.vm.provision 'set env variables', type: 'shell', inline: <<-SHELL + $ErrorActionPreference = "Stop" + [Environment]::SetEnvironmentVariable("PACKAGING_ARCHIVES", "C:/project/build/packaging/archives", "Machine") + [Environment]::SetEnvironmentVariable("PACKAGING_TESTS", "C:/project/build/packaging/tests", "Machine") + SHELL +end diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index bb85359ae3f07..72d71f25f69f2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -13,10 +13,12 @@ import org.gradle.api.tasks.Delete import org.gradle.api.tasks.Exec import org.gradle.api.tasks.TaskState +import static java.util.Collections.unmodifiableList + class VagrantTestPlugin implements Plugin { - /** All available boxes **/ - static List BOXES = [ + /** All Linux boxes that we test. These are all always supplied **/ + static final List LINUX_BOXES = unmodifiableList([ 'centos-6', 'centos-7', 'debian-8', @@ -29,26 +31,35 @@ class VagrantTestPlugin implements Plugin { 'sles-12', 'ubuntu-1404', 'ubuntu-1604' - ] + ]) + + /** All Windows boxes that we test, which may or may not be supplied **/ + static final List WINDOWS_BOXES = unmodifiableList([ + 'windows-2012r2', + 'windows-2016' + ]) + + /** All boxes that we test, some of which may not be supplied **/ + static final List ALL_BOXES = unmodifiableList(LINUX_BOXES + WINDOWS_BOXES) /** Boxes used when sampling the tests **/ - static List SAMPLE = [ + static final List SAMPLE = unmodifiableList([ 'centos-7', - 'ubuntu-1404', - ] + 'ubuntu-1404' + ]) /** All distributions to bring into test VM, whether or not they are used **/ - static List DISTRIBUTIONS = [ + static final List DISTRIBUTIONS = unmodifiableList([ 'archives:tar', 'archives:oss-tar', 'packages:rpm', 'packages:oss-rpm', 'packages:deb', 'packages:oss-deb' - ] + ]) /** Packages onboarded for upgrade tests **/ - static List UPGRADE_FROM_ARCHIVES = ['rpm', 'deb'] + static final List UPGRADE_FROM_ARCHIVES = unmodifiableList(['rpm', 'deb']) private static final PACKAGING_CONFIGURATION = 'packaging' private static final PACKAGING_TEST_CONFIGURATION = 'packagingTest' @@ -56,11 +67,19 @@ class VagrantTestPlugin implements Plugin { private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS" private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest" + /** Boxes that have been supplied and are available for testing **/ + List availableBoxes = [] + + /** extra env vars to pass to vagrant for box configuration **/ + Map vagrantBoxEnvVars = [:] + @Override void apply(Project project) { + collectAvailableBoxes(project) + // Creates the Vagrant extension for the project - project.extensions.create('esvagrant', VagrantPropertiesExtension, listVagrantBoxes(project)) + project.extensions.create('esvagrant', VagrantPropertiesExtension, listSelectedBoxes(project)) // Add required repositories for packaging tests configurePackagingArchiveRepositories(project) @@ -73,12 +92,17 @@ class VagrantTestPlugin implements Plugin { createVagrantTasks(project) if (project.extensions.esvagrant.boxes == null || project.extensions.esvagrant.boxes.size() == 0) { - throw new InvalidUserDataException('Vagrant boxes cannot be null or empty for esvagrant') + throw new InvalidUserDataException('Must specify at least one vagrant box') } for (String box : project.extensions.esvagrant.boxes) { - if (BOXES.contains(box) == false) { - throw new InvalidUserDataException("Vagrant box [${box}] not found, available virtual machines are ${BOXES}") + if (ALL_BOXES.contains(box) == false) { + throw new InvalidUserDataException("Vagrant box [${box}] is unknown to this plugin. Valid boxes are ${ALL_BOXES}") + } + + if (availableBoxes.contains(box) == false) { + throw new InvalidUserDataException("Vagrant box [${box}] is not available because an image is not supplied for it. " + + "Available boxes with supplied images are ${availableBoxes}") } } @@ -86,14 +110,45 @@ class VagrantTestPlugin implements Plugin { createVagrantBoxesTasks(project) } - private List listVagrantBoxes(Project project) { + /** + * Enumerate all the boxes that we know about and could possibly choose to test + */ + private void collectAvailableBoxes(Project project) { + // these images are hardcoded in the Vagrantfile and are always available + availableBoxes.addAll(LINUX_BOXES) + + // these images need to be provided at runtime + String windows_2012r2_box = project.getProperties().get('vagrant.windows-2012r2.id') + if (windows_2012r2_box != null && windows_2012r2_box.isEmpty() == false) { + availableBoxes.add('windows-2012r2') + vagrantBoxEnvVars['VAGRANT_WINDOWS_2012R2_BOX'] = windows_2012r2_box + } + + String windows_2016_box = project.getProperties().get('vagrant.windows-2016.id') + if (windows_2016_box != null && windows_2016_box.isEmpty() == false) { + availableBoxes.add('windows-2016') + vagrantBoxEnvVars['VAGRANT_WINDOWS_2016_BOX'] = windows_2016_box + } + } + + /** + * Enumerate all the boxes that we have chosen to test + */ + private static List listSelectedBoxes(Project project) { String vagrantBoxes = project.getProperties().get('vagrant.boxes', 'sample') - if (vagrantBoxes == 'sample') { - return SAMPLE - } else if (vagrantBoxes == 'all') { - return BOXES - } else { - return vagrantBoxes.split(',') + switch (vagrantBoxes) { + case 'sample': + return SAMPLE + case 'linux-all': + return LINUX_BOXES + case 'windows-all': + return WINDOWS_BOXES + case 'all': + return ALL_BOXES + case '': + return [] + default: + return vagrantBoxes.split(',') } } @@ -184,11 +239,19 @@ class VagrantTestPlugin implements Plugin { from project.configurations[PACKAGING_TEST_CONFIGURATION] } - Task createTestRunnerScript = project.tasks.create('createTestRunnerScript', FileContentsTask) { + Task createLinuxRunnerScript = project.tasks.create('createLinuxRunnerScript', FileContentsTask) { dependsOn copyPackagingTests file "${testsDir}/run-tests.sh" contents "java -cp \"\$PACKAGING_TESTS/*\" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass}" } + Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) { + dependsOn copyPackagingTests + file "${testsDir}/run-tests.ps1" + contents """\ + java -cp "\$Env:PACKAGING_TESTS/*" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass} + exit \$LASTEXITCODE + """ + } Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) { dependsOn copyPackagingArchives @@ -249,20 +312,24 @@ class VagrantTestPlugin implements Plugin { } Task vagrantSetUpTask = project.tasks.create('setupPackagingTest') - vagrantSetUpTask.dependsOn 'vagrantCheckVersion' - vagrantSetUpTask.dependsOn copyPackagingArchives, copyPackagingTests, createTestRunnerScript - vagrantSetUpTask.dependsOn createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile - vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils + vagrantSetUpTask.dependsOn( + 'vagrantCheckVersion', + copyPackagingArchives, + copyPackagingTests, + createLinuxRunnerScript, + createWindowsRunnerScript, + createVersionFile, + createUpgradeFromFile, + createUpgradeIsOssFile, + copyBatsTests, + copyBatsUtils + ) } private static void createPackagingTestTask(Project project) { project.tasks.create('packagingTest') { group 'Verification' - description "Tests yum/apt packages using vagrant and bats.\n" + - " Specify the vagrant boxes to test using the gradle property 'vagrant.boxes'.\n" + - " 'sample' can be used to test a single yum and apt box. 'all' can be used to\n" + - " test all available boxes. The available boxes are: \n" + - " ${BOXES}" + description "Tests distribution installation on different platforms using vagrant. See TESTING.asciidoc for details." dependsOn 'vagrantCheckVersion' } } @@ -270,24 +337,49 @@ class VagrantTestPlugin implements Plugin { private static void createPlatformTestTask(Project project) { project.tasks.create('platformTest') { group 'Verification' - description "Test unit and integ tests on different platforms using vagrant.\n" + - " Specify the vagrant boxes to test using the gradle property 'vagrant.boxes'.\n" + - " 'all' can be used to test all available boxes. The available boxes are: \n" + - " ${BOXES}" + description "Test unit and integ tests on different platforms using vagrant. See TESTING.asciidoc for details. This test " + + "is unmaintained." dependsOn 'vagrantCheckVersion' } } - private static void createVagrantTasks(Project project) { + private void createBoxListTasks(Project project) { + project.tasks.create('listAllBoxes') { + group 'Verification' + description 'List all vagrant boxes which can be tested by this plugin' + doLast { + println("All vagrant boxes supported by ${project.path}") + for (String box : ALL_BOXES) { + println(box) + } + } + dependsOn 'vagrantCheckVersion' + } + + project.tasks.create('listAvailableBoxes') { + group 'Verification' + description 'List all vagrant boxes which are available for testing' + doLast { + println("All vagrant boxes available to ${project.path}") + for (String box : availableBoxes) { + println(box) + } + } + dependsOn 'vagrantCheckVersion' + } + } + + private void createVagrantTasks(Project project) { createCleanTask(project) createStopTask(project) createSmokeTestTask(project) createPrepareVagrantTestEnvTask(project) createPackagingTestTask(project) createPlatformTestTask(project) + createBoxListTasks(project) } - private static void createVagrantBoxesTasks(Project project) { + private void createVagrantBoxesTasks(Project project) { assert project.extensions.esvagrant.boxes != null assert project.tasks.stop != null @@ -320,9 +412,10 @@ class VagrantTestPlugin implements Plugin { 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', 'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}" ] + vagrantEnvVars.putAll(vagrantBoxEnvVars) // Each box gets it own set of tasks - for (String box : BOXES) { + for (String box : availableBoxes) { String boxTask = box.capitalize().replace('-', '') // always add a halt task for all boxes, so clean makes sure they are all shutdown @@ -363,6 +456,7 @@ class VagrantTestPlugin implements Plugin { final Task destroy = project.tasks.create("vagrant${boxTask}#destroy", LoggedExec) { commandLine "bash", "-c", "vagrant status ${box} | grep -q \"${box}\\s\\+not created\" || vagrant destroy ${box} --force" workingDir project.rootProject.rootDir + environment vagrantEnvVars } destroy.onlyIf { vagrantDestroy } update.mustRunAfter(destroy) @@ -386,37 +480,42 @@ class VagrantTestPlugin implements Plugin { environment vagrantEnvVars dependsOn up finalizedBy halt - commandLine 'vagrant', 'ssh', box, '--command', - "set -o pipefail && echo 'Hello from ${project.path}' | sed -ue 's/^/ ${box}: /'" } vagrantSmokeTest.dependsOn(smoke) - - Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) { - remoteCommand BATS_TEST_COMMAND - boxName box - environmentVars vagrantEnvVars - dependsOn up, setupPackagingTest - finalizedBy halt + if (LINUX_BOXES.contains(box)) { + smoke.commandLine = ['vagrant', 'ssh', box, '--command', + "set -o pipefail && echo 'Hello from ${project.path}' | sed -ue 's/^/ ${box}: /'"] + } else { + smoke.commandLine = ['vagrant', 'winrm', box, '--command', + "Write-Host ' ${box}: Hello from ${project.path}'"] } - TaskExecutionAdapter batsPackagingReproListener = createReproListener(project, batsPackagingTest.path) - batsPackagingTest.doFirst { - project.gradle.addListener(batsPackagingReproListener) - } - batsPackagingTest.doLast { - project.gradle.removeListener(batsPackagingReproListener) - } - if (project.extensions.esvagrant.boxes.contains(box)) { - packagingTest.dependsOn(batsPackagingTest) + if (LINUX_BOXES.contains(box)) { + Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) { + remoteCommand BATS_TEST_COMMAND + boxName box + environmentVars vagrantEnvVars + dependsOn up, setupPackagingTest + finalizedBy halt + } + + TaskExecutionAdapter batsPackagingReproListener = createReproListener(project, batsPackagingTest.path) + batsPackagingTest.doFirst { + project.gradle.addListener(batsPackagingReproListener) + } + batsPackagingTest.doLast { + project.gradle.removeListener(batsPackagingReproListener) + } + if (project.extensions.esvagrant.boxes.contains(box)) { + packagingTest.dependsOn(batsPackagingTest) + } } Task javaPackagingTest = project.tasks.create("vagrant${boxTask}#javaPackagingTest", VagrantCommandTask) { - command 'ssh' boxName box environmentVars vagrantEnvVars dependsOn up, setupPackagingTest finalizedBy halt - args '--command', "bash \"\$PACKAGING_TESTS/run-tests.sh\"" } // todo remove this onlyIf after all packaging tests are consolidated @@ -424,6 +523,14 @@ class VagrantTestPlugin implements Plugin { project.extensions.esvagrant.testClass != null } + if (LINUX_BOXES.contains(box)) { + javaPackagingTest.command = 'ssh' + javaPackagingTest.args = ['--command', 'bash "$PACKAGING_TESTS/run-tests.sh"'] + } else { + javaPackagingTest.command = 'winrm' + javaPackagingTest.args = ['--command', 'powershell -File "$Env:PACKAGING_TESTS/run-tests.ps1"'] + } + TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path) javaPackagingTest.doFirst { project.gradle.addListener(javaPackagingReproListener) @@ -435,23 +542,29 @@ class VagrantTestPlugin implements Plugin { packagingTest.dependsOn(javaPackagingTest) } - Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) { - command 'ssh' - boxName box - environmentVars vagrantEnvVars - dependsOn up - finalizedBy halt - args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.testSeed}" - } - TaskExecutionAdapter platformReproListener = createReproListener(project, platform.path) - platform.doFirst { - project.gradle.addListener(platformReproListener) - } - platform.doLast { - project.gradle.removeListener(platformReproListener) - } - if (project.extensions.esvagrant.boxes.contains(box)) { - platformTest.dependsOn(platform) + /* + * This test is unmaintained and was created to run on Linux. We won't allow it to run on Windows + * until it's been brought back into maintenance + */ + if (LINUX_BOXES.contains(box)) { + Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) { + command 'ssh' + boxName box + environmentVars vagrantEnvVars + dependsOn up + finalizedBy halt + args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.testSeed}" + } + TaskExecutionAdapter platformReproListener = createReproListener(project, platform.path) + platform.doFirst { + project.gradle.addListener(platformReproListener) + } + platform.doLast { + project.gradle.removeListener(platformReproListener) + } + if (project.extensions.esvagrant.boxes.contains(box)) { + platformTest.dependsOn(platform) + } } } } From 8a89306aafc6cefc1a6886233036387ad67118ae Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Thu, 17 May 2018 18:27:18 +1000 Subject: [PATCH 09/25] Preserve REST client auth despite 401 response (#30558) The default behaviour for Apache HTTP client is to mimic the standard browser behaviour of clearing the authentication cache (for a given host) if that host responds with 401. This behaviour is appropriate in a interactive browser environment where the user is given the opportunity to provide alternative credentials, but it is not the preferred behaviour for the ES REST client. X-Pack may respond with a 401 status if a request is made before the node/cluster has recovered sufficient state to know how to handle the provided authentication credentials - for example the security index need to be recovered before we can authenticate native users. In these cases the correct behaviour is to retry with the same credentials (rather than discarding those credentials). --- ...tentCredentialsAuthenticationStrategy.java | 59 +++++++++++++++++++ .../client/RestClientBuilder.java | 3 +- .../RestClientSingleHostIntegTests.java | 50 ++++++++++++---- 3 files changed, 99 insertions(+), 13 deletions(-) create mode 100644 client/rest/src/main/java/org/elasticsearch/client/PersistentCredentialsAuthenticationStrategy.java diff --git a/client/rest/src/main/java/org/elasticsearch/client/PersistentCredentialsAuthenticationStrategy.java b/client/rest/src/main/java/org/elasticsearch/client/PersistentCredentialsAuthenticationStrategy.java new file mode 100644 index 0000000000000..4ae22fbe3728e --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/PersistentCredentialsAuthenticationStrategy.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + * + */ + +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpHost; +import org.apache.http.auth.AuthScheme; +import org.apache.http.impl.client.TargetAuthenticationStrategy; +import org.apache.http.protocol.HttpContext; + +/** + * An {@link org.apache.http.client.AuthenticationStrategy} implementation that does not perform + * any special handling if authentication fails. + * The default handler in Apache HTTP client mimics standard browser behaviour of clearing authentication + * credentials if it receives a 401 response from the server. While this can be useful for browser, it is + * rarely the desired behaviour with the Elasticsearch REST API. + * If the code using the REST client has configured credentials for the REST API, then we can and should + * assume that this is intentional, and those credentials represent the best possible authentication + * mechanism to the Elasticsearch node. + * If we receive a 401 status, a probably cause is that the authentication mechanism in place was unable + * to perform the requisite password checks (the node has not yet recovered its state, or an external + * authentication provider was unavailable). + * If this occurs, then the desired behaviour is for the Rest client to retry with the same credentials + * (rather than trying with no credentials, or expecting the calling code to provide alternate credentials). + */ +final class PersistentCredentialsAuthenticationStrategy extends TargetAuthenticationStrategy { + + private final Log logger = LogFactory.getLog(PersistentCredentialsAuthenticationStrategy.class); + + @Override + public void authFailed(HttpHost host, AuthScheme authScheme, HttpContext context) { + if (logger.isDebugEnabled()) { + logger.debug("Authentication to " + host + " failed (scheme: " + authScheme.getSchemeName() + + "). Preserving credentials for next request"); + } + // Do nothing. + // The superclass implementation of method will clear the credentials from the cache, but we don't + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index 8768c07161989..5f7831c67fc28 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -204,7 +204,8 @@ private CloseableHttpAsyncClient createHttpClient() { HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create().setDefaultRequestConfig(requestConfigBuilder.build()) //default settings for connection pooling may be too constraining .setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE).setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL) - .setSSLContext(SSLContext.getDefault()); + .setSSLContext(SSLContext.getDefault()) + .setTargetAuthenticationStrategy(new PersistentCredentialsAuthenticationStrategy()); if (httpClientConfigCallback != null) { httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 667e38a5167d7..35cac627bbe6a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -31,14 +31,14 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.client.TargetAuthenticationStrategy; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; +import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.After; -import org.junit.AfterClass; import org.junit.Before; -import org.junit.BeforeClass; import java.io.IOException; import java.io.InputStreamReader; @@ -147,6 +147,8 @@ public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder h if (usePreemptiveAuth == false) { // disable preemptive auth by ignoring any authcache httpClientBuilder.disableAuthCaching(); + // don't use the "persistent credentials strategy" + httpClientBuilder.setTargetAuthenticationStrategy(new TargetAuthenticationStrategy()); } return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); @@ -193,7 +195,7 @@ public void onFailure(Exception exception) { assertTrue("timeout waiting for requests to be sent", latch.await(10, TimeUnit.SECONDS)); if (exceptions.isEmpty() == false) { AssertionError error = new AssertionError("expected no failures but got some. see suppressed for first 10 of [" - + exceptions.size() + "] failures"); + + exceptions.size() + "] failures"); for (Exception exception : exceptions.subList(0, Math.min(10, exceptions.size()))) { error.addSuppressed(exception); } @@ -217,7 +219,7 @@ public void testHeaders() throws IOException { Response esResponse; try { esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), requestHeaders); - } catch(ResponseException e) { + } catch (ResponseException e) { esResponse = e.getResponse(); } @@ -291,8 +293,8 @@ public void testEncodeParams() throws IOException { /** * Verify that credentials are sent on the first request with preemptive auth enabled (default when provided with credentials). */ - public void testPreemptiveAuthEnabled() throws IOException { - final String[] methods = { "POST", "PUT", "GET", "DELETE" }; + public void testPreemptiveAuthEnabled() throws IOException { + final String[] methods = {"POST", "PUT", "GET", "DELETE"}; try (RestClient restClient = createRestClient(true, true)) { for (final String method : methods) { @@ -306,8 +308,8 @@ public void testPreemptiveAuthEnabled() throws IOException { /** * Verify that credentials are not sent on the first request with preemptive auth disabled. */ - public void testPreemptiveAuthDisabled() throws IOException { - final String[] methods = { "POST", "PUT", "GET", "DELETE" }; + public void testPreemptiveAuthDisabled() throws IOException { + final String[] methods = {"POST", "PUT", "GET", "DELETE"}; try (RestClient restClient = createRestClient(true, false)) { for (final String method : methods) { @@ -318,12 +320,31 @@ public void testPreemptiveAuthDisabled() throws IOException { } } + /** + * Verify that credentials continue to be sent even if a 401 (Unauthorized) response is received + */ + public void testAuthCredentialsAreNotClearedOnAuthChallenge() throws IOException { + final String[] methods = {"POST", "PUT", "GET", "DELETE"}; + + try (RestClient restClient = createRestClient(true, true)) { + for (final String method : methods) { + Header realmHeader = new BasicHeader("WWW-Authenticate", "Basic realm=\"test\""); + final Response response401 = bodyTest(restClient, method, 401, new Header[]{realmHeader}); + assertThat(response401.getHeader("Authorization"), startsWith("Basic")); + + final Response response200 = bodyTest(restClient, method, 200, new Header[0]); + assertThat(response200.getHeader("Authorization"), startsWith("Basic")); + } + } + + } + public void testUrlWithoutLeadingSlash() throws Exception { if (pathPrefix.length() == 0) { try { restClient.performRequest("GET", "200"); fail("request should have failed"); - } catch(ResponseException e) { + } catch (ResponseException e) { assertEquals(404, e.getResponse().getStatusLine().getStatusCode()); } } else { @@ -335,8 +356,8 @@ public void testUrlWithoutLeadingSlash() throws Exception { { //pathPrefix is not required to start with '/', will be added automatically try (RestClient restClient = RestClient.builder( - new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) - .setPathPrefix(pathPrefix.substring(1)).build()) { + new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) + .setPathPrefix(pathPrefix.substring(1)).build()) { Response response = restClient.performRequest("GET", "200"); //a trailing slash gets automatically added if a pathPrefix is configured assertEquals(200, response.getStatusLine().getStatusCode()); @@ -350,10 +371,15 @@ private Response bodyTest(final String method) throws IOException { } private Response bodyTest(final RestClient restClient, final String method) throws IOException { - String requestBody = "{ \"field\": \"value\" }"; int statusCode = randomStatusCode(getRandom()); + return bodyTest(restClient, method, statusCode, new Header[0]); + } + + private Response bodyTest(RestClient restClient, String method, int statusCode, Header[] headers) throws IOException { + String requestBody = "{ \"field\": \"value\" }"; Request request = new Request(method, "/" + statusCode); request.setJsonEntity(requestBody); + request.setHeaders(headers); Response esResponse; try { esResponse = restClient.performRequest(request); From 7915b5f7aaa1f5f67051dd699dfb2eb8d74c6bd1 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 17 May 2018 10:57:25 +0200 Subject: [PATCH 10/25] [Tests] Add debug information to CorruptedFileIT This test failed but the cause is not obvious. This commit adds more debug logging traces so that if it reproduces we could gather more information. Related #30577 --- .../blobstore/BlobStoreRepository.java | 4 +--- .../index/store/CorruptedFileIT.java | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index fcbc54efbf780..ba4b68b20b246 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -1147,6 +1147,7 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should final Collection fileNames; try { + logger.trace("[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit); metadata = store.getMetadata(snapshotIndexCommit); fileNames = snapshotIndexCommit.getFileNames(); } catch (IOException e) { @@ -1242,9 +1243,6 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { /** * Snapshot individual file - *

- * This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are - * added to the {@code failures} list * * @param fileInfo file to be snapshotted */ diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index bbfa56a0e55fe..59156c91558c7 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -470,7 +470,7 @@ protected void sendRequest(Connection connection, long requestId, String action, * TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several * parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard. */ - @TestLogging("org.elasticsearch.monitor.fs:DEBUG") + @TestLogging("org.elasticsearch.repositories:TRACE,org.elasticsearch.snapshots:TRACE") public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException { int numDocs = scaledRandomIntBetween(100, 1000); internalCluster().ensureAtLeastNumDataNodes(2); @@ -494,6 +494,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I assertHitCount(countResponse, numDocs); ShardRouting shardRouting = corruptRandomPrimaryFile(false); + logger.info("--> shard {} has a corrupted file", shardRouting); // we don't corrupt segments.gen since S/R doesn't snapshot this file // the other problem here why we can't corrupt segments.X files is that the snapshot flushes again before // it snapshots and that will write a new segments.X+1 file @@ -504,9 +505,12 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); - logger.info("failed during snapshot -- maybe SI file got corrupted"); + final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test") + .get(); + final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state(); + logger.info("--> snapshot terminated with state " + snapshotState); final List files = listShardFiles(shardRouting); Path corruptedFile = null; for (Path file : files) { @@ -515,6 +519,11 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I break; } } + if (snapshotState != SnapshotState.PARTIAL) { + logger.info("--> listing shard files for investigation"); + files.forEach(f -> logger.info("path: {}", f.toAbsolutePath())); + } + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); assertThat(corruptedFile, notNullValue()); } From 2ac1f9fe89f70e5a3eed084e25504102498763b3 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 17 May 2018 10:58:25 +0200 Subject: [PATCH 11/25] Fix _cluster/state to always return cluster_uuid (#30656) Since #30143, the Cluster State API should always returns the current cluster_uuid in the response body, regardless of the metrics filters. This is not exactly true as it is returned only if metadata metrics and no specific indices are requested. This commit fixes the behavior to always return the cluster_uuid and add new test. --- .../test/cluster.state/20_filtering.yml | 18 +++++++++++++++--- .../state/TransportClusterStateAction.java | 19 +++++++++---------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml index 880efaff19aa6..861e1200991b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml @@ -163,12 +163,24 @@ setup: version: " - 6.3.99" reason: "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher" + # Get the current cluster_uuid + - do: + cluster.state: {} + - set: { metadata.cluster_uuid : cluster_uuid } + - do: cluster.state: - metric: [ master_node, version, metadata ] + metric: [ master_node, version ] - - is_true: cluster_uuid + - match: { cluster_uuid: $cluster_uuid } - is_true: master_node - is_true: version - is_true: state_uuid - - is_true: metadata + + - do: + cluster.state: + metric: [ routing_table ] + index: testidx + + - match: { cluster_uuid: $cluster_uuid } + - is_true: routing_table diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 58d6a299bb936..22e71e0a85f49 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -98,14 +98,11 @@ protected void masterOperation(final ClusterStateRequest request, final ClusterS if (request.blocks()) { builder.blocks(currentState.blocks()); } - if (request.metaData()) { - MetaData.Builder mdBuilder; - if (request.indices().length == 0) { - mdBuilder = MetaData.builder(currentState.metaData()); - } else { - mdBuilder = MetaData.builder(); - } + MetaData.Builder mdBuilder = MetaData.builder(); + mdBuilder.clusterUUID(currentState.metaData().clusterUUID()); + + if (request.metaData()) { if (request.indices().length > 0) { String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request); for (String filteredIndex : indices) { @@ -114,17 +111,19 @@ protected void masterOperation(final ClusterStateRequest request, final ClusterS mdBuilder.put(indexMetaData, false); } } + } else { + mdBuilder = MetaData.builder(currentState.metaData()); } // Filter our metadata that shouldn't be returned by API - for(ObjectObjectCursor custom : currentState.metaData().customs()) { + for(ObjectObjectCursor custom : currentState.metaData().customs()) { if(!custom.value.context().contains(MetaData.XContentContext.API)) { mdBuilder.removeCustom(custom.key); } } - - builder.metaData(mdBuilder); } + builder.metaData(mdBuilder); + if (request.customs()) { for (ObjectObjectCursor custom : currentState.customs()) { if (custom.value.isPrivate() == false) { From 11d776ecf06517707a12e556f4d0a9cd292d3c0b Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Thu, 17 May 2018 11:57:54 +0200 Subject: [PATCH 12/25] Watcher: Fix watch history template for dynamic slack attachments (#30172) The part of the history template responsible for slack attachments had a dynamic mapping configured which could lead to problems, when a string value looking like a date was configured in the value field of an attachment. This commit fixes the template by setting this field always to text. This also requires a change in the template numbering to be sure this will be applied properly when starting watcher. --- .../WatcherIndexTemplateRegistryField.java | 3 ++- .../src/main/resources/watch-history.json | 7 ++++++ .../rest-api-spec/test/slack/10_slack.yml | 23 +++++++++++++++++-- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java index 4cf0898bae2ff..25e2c928d9a57 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java @@ -12,8 +12,9 @@ public final class WatcherIndexTemplateRegistryField { // version 3: include watch status in history // version 6: upgrade to ES 6, removal of _status field // version 7: add full exception stack traces for better debugging + // version 8: fix slack attachment property not to be dynamic, causing field type issues // Note: if you change this, also inform the kibana team around the watcher-ui - public static final String INDEX_TEMPLATE_VERSION = "7"; + public static final String INDEX_TEMPLATE_VERSION = "8"; public static final String HISTORY_TEMPLATE_NAME = ".watch-history-" + INDEX_TEMPLATE_VERSION; public static final String TRIGGERED_TEMPLATE_NAME = ".triggered_watches"; public static final String WATCHES_TEMPLATE_NAME = ".watches"; diff --git a/x-pack/plugin/core/src/main/resources/watch-history.json b/x-pack/plugin/core/src/main/resources/watch-history.json index a26305b35542a..d158281c264d2 100644 --- a/x-pack/plugin/core/src/main/resources/watch-history.json +++ b/x-pack/plugin/core/src/main/resources/watch-history.json @@ -507,6 +507,13 @@ "properties" : { "color" : { "type" : "keyword" + }, + "fields" : { + "properties" : { + "value" : { + "type" : "text" + } + } } } } diff --git a/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml index 3b04ba716759a..259bc9e1d25af 100644 --- a/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml +++ b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml @@ -16,7 +16,13 @@ }, "input": { "simple": { - "foo" : "something from input" + "foo" : "something from input", + "hits" : { + "hits" : [ + { "_source" : { "name" : "first", "value" : "2018-04-26T11:45:12.518Z" } }, + { "_source" : { "name" : "second", "value" : "anything" } } + ] + } } }, "actions": { @@ -49,7 +55,20 @@ } ] } - ] + ], + "dynamic_attachments" : { + "list_path" : "ctx.payload.hits.hits", + "attachment_template" : { + "title": "Title", + "fields" : [ + { + "title" : "Field title {{_source.name}}", + "value" : "{{_source.value}}", + "short" : true + } + ] + } + } } } } From b6340658f41dead30a5ded6d03506032a3eb8100 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 17 May 2018 12:52:22 +0200 Subject: [PATCH 13/25] Deprecate `nGram` and `edgeNGram` names for ngram filters (#30209) The camel case name `nGram` should be removed in favour of `ngram` and similar for `edgeNGram` and `edge_ngram`. Before removal, we need to deprecate the camel case names first. This change adds deprecation warnings for indices with versions 6.4.0 and higher and logs deprecation warnings. --- .../analysis/common/CommonAnalysisPlugin.java | 21 +++- .../common/CommonAnalysisPluginTests.java | 119 ++++++++++++++++++ .../common/NGramTokenizerFactoryTests.java | 21 +--- sdfhksldj | 0 .../analysis/PreConfiguredTokenFilter.java | 9 ++ 5 files changed, 146 insertions(+), 24 deletions(-) create mode 100644 modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java create mode 100644 sdfhksldj diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index c9b48f0c8650d..624194092a02e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -237,9 +237,14 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input -> new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE))); - // TODO deprecate edgeNGram - filters.add(PreConfiguredTokenFilter.singleton("edgeNGram", false, input -> - new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE))); + filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, (reader, version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("edgeNGram_deprecation", + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead."); + } + return new EdgeNGramTokenFilter(reader, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE); + })); filters.add(PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES))); filters.add(PreConfiguredTokenFilter.singleton("french_stem", false, input -> new SnowballFilter(input, new FrenchStemmer()))); @@ -256,8 +261,14 @@ public List getPreConfiguredTokenFilters() { LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS))); filters.add(PreConfiguredTokenFilter.singleton("ngram", false, NGramTokenFilter::new)); - // TODO deprecate nGram - filters.add(PreConfiguredTokenFilter.singleton("nGram", false, NGramTokenFilter::new)); + filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, (reader, version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("nGram_deprecation", + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead."); + } + return new NGramTokenFilter(reader); + })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("reverse", false, ReverseStringFilter::new)); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java new file mode 100644 index 0000000000000..1d2b8a36810eb --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.MockTokenizer; +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Map; + +public class CommonAnalysisPluginTests extends ESTestCase { + + /** + * Check that the deprecated name "nGram" issues a deprecation warning for indices created since 6.3.0 + */ + public void testNGramDeprecationWarning() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; + TokenFilterFactory tokenFilterFactory = tokenFilters.get("nGram"); + Tokenizer tokenizer = new MockTokenizer(); + tokenizer.setReader(new StringReader("foo bar")); + assertNotNull(tokenFilterFactory.create(tokenizer)); + assertWarnings( + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead."); + } + } + + /** + * Check that the deprecated name "nGram" does NOT issues a deprecation warning for indices created before 6.4.0 + */ + public void testNGramNoDeprecationWarningPre6_4() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; + TokenFilterFactory tokenFilterFactory = tokenFilters.get("nGram"); + Tokenizer tokenizer = new MockTokenizer(); + tokenizer.setReader(new StringReader("foo bar")); + assertNotNull(tokenFilterFactory.create(tokenizer)); + } + } + + /** + * Check that the deprecated name "edgeNGram" issues a deprecation warning for indices created since 6.3.0 + */ + public void testEdgeNGramDeprecationWarning() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; + TokenFilterFactory tokenFilterFactory = tokenFilters.get("edgeNGram"); + Tokenizer tokenizer = new MockTokenizer(); + tokenizer.setReader(new StringReader("foo bar")); + assertNotNull(tokenFilterFactory.create(tokenizer)); + assertWarnings( + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead."); + } + } + + /** + * Check that the deprecated name "edgeNGram" does NOT issues a deprecation warning for indices created before 6.4.0 + */ + public void testEdgeNGramNoDeprecationWarningPre6_4() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; + TokenFilterFactory tokenFilterFactory = tokenFilters.get("edgeNGram"); + Tokenizer tokenizer = new MockTokenizer(); + tokenizer.setReader(new StringReader("foo bar")); + assertNotNull(tokenFilterFactory.create(tokenizer)); + } + } +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java index 078e0a9cb9ed3..1cf6ef4696d04 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java @@ -32,15 +32,11 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.ArrayList; import java.util.Arrays; -import java.util.List; -import java.util.Random; import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween; import static org.hamcrest.Matchers.instanceOf; @@ -129,7 +125,7 @@ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception { for (int i = 0; i < iters; i++) { final Index index = new Index("test", "_na_"); final String name = "ngr"; - Version v = randomVersion(random()); + Version v = VersionUtils.randomVersion(random()); Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3); boolean reverse = random().nextBoolean(); if (reverse) { @@ -150,7 +146,6 @@ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception { } } - /*` * test that throws an error when trying to get a NGramTokenizer where difference between max_gram and min_gram * is greater than the allowed value of max_ngram_diff @@ -175,16 +170,4 @@ public void testMaxNGramDiffException() throws Exception{ + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() + "] index level setting.", ex.getMessage()); } - - private Version randomVersion(Random random) throws IllegalArgumentException, IllegalAccessException { - Field[] declaredFields = Version.class.getFields(); - List versionFields = new ArrayList<>(); - for (Field field : declaredFields) { - if ((field.getModifiers() & Modifier.STATIC) != 0 && field.getName().startsWith("V_") && field.getType() == Version.class) { - versionFields.add(field); - } - } - return (Version) versionFields.get(random.nextInt(versionFields.size())).get(Version.class); - } - } diff --git a/sdfhksldj b/sdfhksldj new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java index 777fb589c9db0..12130e856f32a 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java @@ -41,6 +41,15 @@ public static PreConfiguredTokenFilter singleton(String name, boolean useFilterF (tokenStream, version) -> create.apply(tokenStream)); } + /** + * Create a pre-configured token filter that may not vary at all. + */ + public static PreConfiguredTokenFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, + BiFunction create) { + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, CachingStrategy.ONE, + (tokenStream, version) -> create.apply(tokenStream, version)); + } + /** * Create a pre-configured token filter that may vary based on the Lucene version. */ From 3dfa93ef7cdad14061f5d8a4b285059f35bb2f6a Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Thu, 17 May 2018 07:09:18 -0400 Subject: [PATCH 14/25] Improve explanation in rescore (#30629) Currently in a rescore request if window_size is smaller than the top N documents returned (N=size), explanation of scores could be incorrect for documents that were a part of topN and not part of rescoring. This PR corrects this, but saving in RescoreContext docIDs of documents for which rescoring was applied, and adding rescoring explanation only for these docIDs. Closes #28725 --- .../test/search/210_rescore_explain.yml | 39 +++++++++++++++++++ .../search/rescore/QueryRescorer.java | 34 +++++++++------- .../search/rescore/RescoreContext.java | 11 ++++++ 3 files changed, 69 insertions(+), 15 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml new file mode 100644 index 0000000000000..24920580c4552 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml @@ -0,0 +1,39 @@ +--- +"Score should match explanation in rescore": + - skip: + version: " - 6.99.99" + reason: Explanation for rescoring was corrected after these versions + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_index", "_type": "_doc", "_id": "1"}}' + - '{"f1": "1"}' + - '{"index": {"_index": "test_index", "_type": "_doc", "_id": "2"}}' + - '{"f1": "2"}' + - '{"index": {"_index": "test_index", "_type": "_doc", "_id": "3"}}' + - '{"f1": "3"}' + + - do: + search: + index: test_index + body: + explain: true + query: + match_all: {} + rescore: + window_size: 2 + query: + rescore_query: + match_all: {} + query_weight: 5 + rescore_query_weight: 10 + + - match: { hits.hits.0._score: 15 } + - match: { hits.hits.0._explanation.value: 15 } + + - match: { hits.hits.1._score: 15 } + - match: { hits.hits.1._explanation.value: 15 } + + - match: { hits.hits.2._score: 5 } + - match: { hits.hits.2._explanation.value: 5 } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index d4cf05d542560..4a9567a32c06a 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -30,6 +30,8 @@ import java.util.Arrays; import java.util.Comparator; import java.util.Set; +import java.util.Collections; +import static java.util.stream.Collectors.toSet; public final class QueryRescorer implements Rescorer { @@ -61,6 +63,11 @@ protected float combine(float firstPassScore, boolean secondPassMatches, float s // First take top slice of incoming docs, to be rescored: TopDocs topNFirstPass = topN(topDocs, rescoreContext.getWindowSize()); + // Save doc IDs for which rescoring was applied to be used in score explanation + Set topNDocIDs = Collections.unmodifiableSet( + Arrays.stream(topNFirstPass.scoreDocs).map(scoreDoc -> scoreDoc.doc).collect(toSet())); + rescoreContext.setRescoredDocs(topNDocIDs); + // Rescore them: TopDocs rescored = rescorer.rescore(searcher, topNFirstPass, rescoreContext.getWindowSize()); @@ -71,16 +78,12 @@ protected float combine(float firstPassScore, boolean secondPassMatches, float s @Override public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreContext rescoreContext, Explanation sourceExplanation) throws IOException { - QueryRescoreContext rescore = (QueryRescoreContext) rescoreContext; if (sourceExplanation == null) { // this should not happen but just in case return Explanation.noMatch("nothing matched"); } - // TODO: this isn't right? I.e., we are incorrectly pretending all first pass hits were rescored? If the requested docID was - // beyond the top rescoreContext.window() in the first pass hits, we don't rescore it now? - Explanation rescoreExplain = searcher.explain(rescore.query(), topLevelDocId); + QueryRescoreContext rescore = (QueryRescoreContext) rescoreContext; float primaryWeight = rescore.queryWeight(); - Explanation prim; if (sourceExplanation.isMatch()) { prim = Explanation.match( @@ -89,23 +92,24 @@ public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreCon } else { prim = Explanation.noMatch("First pass did not match", sourceExplanation); } - - // NOTE: we don't use Lucene's Rescorer.explain because we want to insert our own description with which ScoreMode was used. Maybe - // we should add QueryRescorer.explainCombine to Lucene? - if (rescoreExplain != null && rescoreExplain.isMatch()) { - float secondaryWeight = rescore.rescoreQueryWeight(); - Explanation sec = Explanation.match( + if (rescoreContext.isRescored(topLevelDocId)){ + Explanation rescoreExplain = searcher.explain(rescore.query(), topLevelDocId); + // NOTE: we don't use Lucene's Rescorer.explain because we want to insert our own description with which ScoreMode was used. + // Maybe we should add QueryRescorer.explainCombine to Lucene? + if (rescoreExplain != null && rescoreExplain.isMatch()) { + float secondaryWeight = rescore.rescoreQueryWeight(); + Explanation sec = Explanation.match( rescoreExplain.getValue() * secondaryWeight, "product of:", rescoreExplain, Explanation.match(secondaryWeight, "secondaryWeight")); - QueryRescoreMode scoreMode = rescore.scoreMode(); - return Explanation.match( + QueryRescoreMode scoreMode = rescore.scoreMode(); + return Explanation.match( scoreMode.combine(prim.getValue(), sec.getValue()), scoreMode + " of:", prim, sec); - } else { - return prim; + } } + return prim; } private static final Comparator SCORE_DOC_COMPARATOR = new Comparator() { diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java index 75ce807a67f47..d070b5abd3f01 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java @@ -19,6 +19,8 @@ package org.elasticsearch.search.rescore; +import java.util.Set; + /** * Context available to the rescore while it is running. Rescore * implementations should extend this with any additional resources that @@ -27,6 +29,7 @@ public class RescoreContext { private final int windowSize; private final Rescorer rescorer; + private Set recroredDocs; //doc Ids for which rescoring was applied /** * Build the context. @@ -50,4 +53,12 @@ public Rescorer rescorer() { public int getWindowSize() { return windowSize; } + + public void setRescoredDocs(Set docIds) { + recroredDocs = docIds; + } + + public boolean isRescored(int docId) { + return recroredDocs.contains(docId); + } } From 35fa9349717ae670ab8ff572493a08a4b484266f Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Thu, 17 May 2018 14:10:49 +0300 Subject: [PATCH 15/25] Adjust fast forward for token expiration test (#30668) Adjust fast forward for token expiration test Adjusts the maximum fast forward time for token expiration tests to be 5 seconds before actual token expiration so that the test won't fail even when upperlimit is randomly selected. Resolves: #30062 --- .../elasticsearch/xpack/security/authc/TokenServiceTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index af1034d957455..28cf4bf95c924 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -450,7 +450,7 @@ public void testTokenExpiry() throws Exception { } final TimeValue defaultExpiration = TokenService.TOKEN_EXPIRATION.get(Settings.EMPTY); - final int fastForwardAmount = randomIntBetween(1, Math.toIntExact(defaultExpiration.getSeconds())); + final int fastForwardAmount = randomIntBetween(1, Math.toIntExact(defaultExpiration.getSeconds()) - 5); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { // move the clock forward but don't go to expiry clock.fastForwardSeconds(fastForwardAmount); From 9f824c4aa83c25d89cc80e05eecfa742b8c47c9e Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Thu, 17 May 2018 21:36:13 +1000 Subject: [PATCH 16/25] Add detailed assert message to IndexAuditUpgradeIT (#30669) Print out the returned buckets if the size does not match the expectation. --- .../java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java index ffdf0a39a909a..1f76e670854a2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java @@ -71,6 +71,6 @@ private void assertNumUniqueNodeNameBuckets(int numBuckets) throws Exception { assertNotNull(nodesAgg); List> buckets = (List>) nodesAgg.get("buckets"); assertNotNull(buckets); - assertEquals(numBuckets, buckets.size()); + assertEquals("Found node buckets " + buckets, numBuckets, buckets.size()); } } From 399489b24263172cbbe9ea33dcf5826d8d420642 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 17 May 2018 13:55:36 +0200 Subject: [PATCH 17/25] Remove bogus file accidentally added --- sdfhksldj | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 sdfhksldj diff --git a/sdfhksldj b/sdfhksldj deleted file mode 100644 index e69de29bb2d1d..0000000000000 From b57d21bab177820414880bf4291dd93b5b120cbd Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 17 May 2018 13:58:10 +0200 Subject: [PATCH 18/25] User proper write-once semantics for GCS repository (#30438) There's no need for an extra blobExists() call when writing a blob to the GCS service. GCS provides an option (with stronger consistency guarantees) on the insert method that guarantees that the blob that's uploaded does not already exist. Relates to #19749 --- .../gcs/GoogleCloudStorageBlobContainer.java | 3 - .../gcs/GoogleCloudStorageBlobStore.java | 58 +++++++++++++------ .../repositories/gcs/MockStorage.java | 21 ++++++- 3 files changed, 58 insertions(+), 24 deletions(-) diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index 331e2dadca2da..833539905103a 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -66,9 +66,6 @@ public InputStream readBlob(String blobName) throws IOException { @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - if (blobExists(blobName)) { - throw new FileAlreadyExistsException("blob [" + blobName + "] already exists, cannot overwrite"); - } blobStore.writeBlob(buildKey(blobName), inputStream, blobSize); } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 5dc03ea45de03..83aafdde2b1ab 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -28,6 +28,7 @@ import com.google.cloud.storage.Storage; import com.google.cloud.storage.Storage.BlobListOption; import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.StorageException; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -47,12 +48,15 @@ import java.nio.channels.Channels; import java.nio.channels.ReadableByteChannel; import java.nio.channels.WritableByteChannel; +import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static java.net.HttpURLConnection.HTTP_PRECON_FAILED; + class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore { // The recommended maximum size of a blob that should be uploaded in a single @@ -204,24 +208,32 @@ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws I * @param inputStream the stream containing the blob data */ private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream) throws IOException { - final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException(() -> storage.writer(blobInfo)); - Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { - @Override - public boolean isOpen() { - return writeChannel.isOpen(); - } + try { + final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException( + () -> storage.writer(blobInfo, Storage.BlobWriteOption.doesNotExist())); + Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { + @Override + public boolean isOpen() { + return writeChannel.isOpen(); + } - @Override - public void close() throws IOException { - SocketAccess.doPrivilegedVoidIOException(writeChannel::close); - } + @Override + public void close() throws IOException { + SocketAccess.doPrivilegedVoidIOException(writeChannel::close); + } - @SuppressForbidden(reason = "Channel is based of a socket not a file") - @Override - public int write(ByteBuffer src) throws IOException { - return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); + @SuppressForbidden(reason = "Channel is based of a socket not a file") + @Override + public int write(ByteBuffer src) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); + } + })); + } catch (StorageException se) { + if (se.getCode() == HTTP_PRECON_FAILED) { + throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); } - })); + throw se; + } } /** @@ -238,7 +250,17 @@ private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method"; final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize)); Streams.copy(inputStream, baos); - SocketAccess.doPrivilegedVoidIOException(() -> storage.create(blobInfo, baos.toByteArray())); + SocketAccess.doPrivilegedVoidIOException( + () -> { + try { + storage.create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist()); + } catch (StorageException se) { + if (se.getCode() == HTTP_PRECON_FAILED) { + throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); + } + throw se; + } + }); } /** @@ -295,8 +317,8 @@ void deleteBlobs(Collection blobNames) throws IOException { /** * Moves a blob within the same bucket * - * @param sourceBlob name of the blob to move - * @param targetBlob new name of the blob in the same bucket + * @param sourceBlobName name of the blob to move + * @param targetBlobName new name of the blob in the same bucket */ void moveBlob(String sourceBlobName, String targetBlobName) throws IOException { final BlobId sourceBlobId = BlobId.of(bucket, sourceBlobName); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index 2b52b7a32a9cc..1b31b3018e48a 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -56,6 +56,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; /** * {@link MockStorage} mocks a {@link Storage} client by storing all the blobs @@ -113,7 +114,14 @@ public Blob create(BlobInfo blobInfo, byte[] content, BlobTargetOption... option if (bucketName.equals(blobInfo.getBucket()) == false) { throw new StorageException(404, "Bucket not found"); } - blobs.put(blobInfo.getName(), content); + if (Stream.of(options).anyMatch(option -> option.equals(BlobTargetOption.doesNotExist()))) { + byte[] existingBytes = blobs.putIfAbsent(blobInfo.getName(), content); + if (existingBytes != null) { + throw new StorageException(412, "Blob already exists"); + } + } else { + blobs.put(blobInfo.getName(), content); + } return get(BlobId.of(blobInfo.getBucket(), blobInfo.getName())); } @@ -243,9 +251,16 @@ public boolean isOpen() { } @Override - public void close() throws IOException { + public void close() { IOUtils.closeWhileHandlingException(writableByteChannel); - blobs.put(blobInfo.getName(), output.toByteArray()); + if (Stream.of(options).anyMatch(option -> option.equals(BlobWriteOption.doesNotExist()))) { + byte[] existingBytes = blobs.putIfAbsent(blobInfo.getName(), output.toByteArray()); + if (existingBytes != null) { + throw new StorageException(412, "Blob already exists"); + } + } else { + blobs.put(blobInfo.getName(), output.toByteArray()); + } } }; } From ef0daee850f4c68ac5a9f27f8140674f95ae923a Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 17 May 2018 12:59:20 +0100 Subject: [PATCH 19/25] [TEST] Account for increase in ML C++ memory usage (#30675) Recent changes to the ML C++ have resulted in higher memory usage, so fewer "by" fields can be analyzed in a given amount of model memory. --- .../xpack/ml/integration/AutodetectMemoryLimitIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java index c46b1d1c8689b..f54f1bf54e932 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -134,7 +134,7 @@ public void testTooManyByFields() throws Exception { assertThat(modelSizeStats.getModelBytes(), lessThan(36000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(30000000L)); assertThat(modelSizeStats.getTotalByFieldCount(), lessThan(1900L)); - assertThat(modelSizeStats.getTotalByFieldCount(), greaterThan(1600L)); + assertThat(modelSizeStats.getTotalByFieldCount(), greaterThan(1500L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } From 712473b5582fd723d904aba3374f54763e8610a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 17 May 2018 14:23:08 +0200 Subject: [PATCH 20/25] [Docs] Replace InetSocketTransportAddress with TransportAdress (#30673) The former class has been removed in 6.0, the documentation code snippets should be updated accordingly. --- docs/java-api/query-dsl/has-parent-query.asciidoc | 2 +- docs/java-api/query-dsl/percolate-query.asciidoc | 2 +- x-pack/docs/en/watcher/java.asciidoc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/java-api/query-dsl/has-parent-query.asciidoc b/docs/java-api/query-dsl/has-parent-query.asciidoc index 63711c399f71a..6a83fe2b0698f 100644 --- a/docs/java-api/query-dsl/has-parent-query.asciidoc +++ b/docs/java-api/query-dsl/has-parent-query.asciidoc @@ -9,7 +9,7 @@ When using the `has_parent` query it is important to use the `PreBuiltTransportC -------------------------------------------------- Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new InetSocketTransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); +client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); -------------------------------------------------- Otherwise the parent-join module doesn't get loaded and the `has_parent` query can't be used from the transport client. diff --git a/docs/java-api/query-dsl/percolate-query.asciidoc b/docs/java-api/query-dsl/percolate-query.asciidoc index a5651392b628e..e1968ae456a5c 100644 --- a/docs/java-api/query-dsl/percolate-query.asciidoc +++ b/docs/java-api/query-dsl/percolate-query.asciidoc @@ -9,7 +9,7 @@ See: -------------------------------------------------- Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new InetSocketTransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); +client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); -------------------------------------------------- Before the `percolate` query can be used an `percolator` mapping should be added and diff --git a/x-pack/docs/en/watcher/java.asciidoc b/x-pack/docs/en/watcher/java.asciidoc index f54e8554e8e88..bcf41252433ba 100644 --- a/x-pack/docs/en/watcher/java.asciidoc +++ b/x-pack/docs/en/watcher/java.asciidoc @@ -95,7 +95,7 @@ TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() .put("cluster.name", "myClusterName") ... .build()) - .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300)); + .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)); XPackClient xpackClient = new XPackClient(client); WatcherClient watcherClient = xpackClient.watcher(); From cefbd29db3adf2c7e60a11f842f2bc6fea234f15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Pr=C4=85dzy=C5=84ski?= Date: Thu, 17 May 2018 16:21:25 +0200 Subject: [PATCH 21/25] top_hits doc example description update (#30676) Example description does not fit example code. --- .../aggregations/metrics/tophits-aggregation.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index 0c19bf172bbf0..dc3222a5f371e 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -28,8 +28,8 @@ The top_hits aggregation returns regular search hits, because of this many per h ==== Example -In the following example we group the questions by tag and per tag we show the last active question. For each question -only the title field is being included in the source. +In the following example we group the sales by type and per type we show the last sale. +For each sale only the date and price fields are being included in the source. [source,js] -------------------------------------------------- From a0a8c4f1861ead003bcc8d41d6c38200dc250135 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Pr=C4=85dzy=C5=84ski?= Date: Thu, 17 May 2018 16:21:50 +0200 Subject: [PATCH 22/25] filters agg docs duplicated 'bucket' word removal (#30677) In one place word 'bucket' was duplicated. --- docs/reference/aggregations/bucket/filters-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc index 3ca86d1d7a096..b7e3b1edf10d2 100644 --- a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc @@ -124,7 +124,7 @@ The `other_bucket` parameter can be set to add a bucket to the response which wi not match any of the given filters. The value of this parameter can be as follows: `false`:: Does not compute the `other` bucket -`true`:: Returns the `other` bucket bucket either in a bucket (named `_other_` by default) if named filters are being used, +`true`:: Returns the `other` bucket either in a bucket (named `_other_` by default) if named filters are being used, or as the last bucket if anonymous filters are being used The `other_bucket_key` parameter can be used to set the key for the `other` bucket to a value other than the default `_other_`. Setting From 7c2fc260119a100470c35765acf32e34eef64cd0 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Thu, 17 May 2018 10:34:42 -0400 Subject: [PATCH 23/25] Correct typos Relates to #28725 --- .../org/elasticsearch/search/rescore/RescoreContext.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java index d070b5abd3f01..2401b9ff32900 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java @@ -29,7 +29,7 @@ public class RescoreContext { private final int windowSize; private final Rescorer rescorer; - private Set recroredDocs; //doc Ids for which rescoring was applied + private Set resroredDocs; //doc Ids for which rescoring was applied /** * Build the context. @@ -55,10 +55,10 @@ public int getWindowSize() { } public void setRescoredDocs(Set docIds) { - recroredDocs = docIds; + resroredDocs = docIds; } public boolean isRescored(int docId) { - return recroredDocs.contains(docId); + return resroredDocs.contains(docId); } } From 75665a2d3e0f73734799b52ed1a223bdcef0e13c Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Thu, 17 May 2018 17:51:26 +0300 Subject: [PATCH 24/25] [ML] Clean left behind model state docs (#30659) It is possible for state documents to be left behind in the state index. This may be because of bugs or uncontrollable scenarios. In any case, those documents may take up quite some disk space when they add up. This commit adds a step in the expired data deletion that is part of the daily maintenance service. The new step searches for state documents that do not belong to any of the current jobs and deletes them. Closes #30551 --- .../autodetect/state/CategorizerState.java | 10 ++ .../process/autodetect/state/ModelState.java | 10 ++ .../process/autodetect/state/Quantiles.java | 10 ++ .../state/CategorizerStateTests.java | 29 ++++ .../autodetect/state/ModelStateTests.java | 31 ++++ .../autodetect/state/QuantilesTests.java | 17 +++ .../TransportDeleteExpiredDataAction.java | 4 +- .../persistence/BatchedDocumentsIterator.java | 9 ++ .../BatchedStateDocIdsIterator.java | 36 +++++ .../ml/job/retention/UnusedStateRemover.java | 134 ++++++++++++++++++ .../ml/integration/DeleteExpiredDataIT.java | 36 ++++- 11 files changed, 324 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerStateTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelStateTests.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java index 8c08300354698..2d68a6d7cf7a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java @@ -37,6 +37,16 @@ public static final String v54DocumentPrefix(String jobId) { return jobId + "#"; } + /** + * Given the id of a categorizer state document it extracts the job id + * @param docId the categorizer state document id + * @return the job id or {@code null} if the id is not valid + */ + public static final String extractJobId(String docId) { + int suffixIndex = docId.lastIndexOf("_" + TYPE); + return suffixIndex <= 0 ? null : docId.substring(0, suffixIndex); + } + private CategorizerState() { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java index dce791a2b3d26..fbec7bb6c7291 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java @@ -29,6 +29,16 @@ public static final String v54DocumentId(String jobId, String snapshotId, int do return jobId + "-" + snapshotId + "#" + docNum; } + /** + * Given the id of a state document it extracts the job id + * @param docId the state document id + * @return the job id or {@code null} if the id is not valid + */ + public static final String extractJobId(String docId) { + int suffixIndex = docId.lastIndexOf("_" + TYPE + "_"); + return suffixIndex <= 0 ? null : docId.substring(0, suffixIndex); + } + private ModelState() { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java index 0c167aadb7623..0b3ddcc7b5197 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java @@ -60,6 +60,16 @@ public static String v54DocumentId(String jobId) { return jobId + "-" + TYPE; } + /** + * Given the id of a quantiles document it extracts the job id + * @param docId the quantiles document id + * @return the job id or {@code null} if the id is not valid + */ + public static final String extractJobId(String docId) { + int suffixIndex = docId.lastIndexOf("_" + TYPE); + return suffixIndex <= 0 ? null : docId.substring(0, suffixIndex); + } + private final String jobId; private final Date timestamp; private final String quantileState; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerStateTests.java new file mode 100644 index 0000000000000..726288faffbc7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerStateTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; + +public class CategorizerStateTests extends ESTestCase { + + public void testExtractJobId_GivenValidDocId() { + assertThat(CategorizerState.extractJobId("foo_categorizer_state#1"), equalTo("foo")); + assertThat(CategorizerState.extractJobId("bar_categorizer_state#2"), equalTo("bar")); + assertThat(CategorizerState.extractJobId("foo_bar_categorizer_state#3"), equalTo("foo_bar")); + assertThat(CategorizerState.extractJobId("_categorizer_state_categorizer_state#3"), equalTo("_categorizer_state")); + } + + public void testExtractJobId_GivenInvalidDocId() { + assertThat(CategorizerState.extractJobId(""), is(nullValue())); + assertThat(CategorizerState.extractJobId("foo"), is(nullValue())); + assertThat(CategorizerState.extractJobId("_categorizer_state"), is(nullValue())); + assertThat(CategorizerState.extractJobId("foo_model_state_3141341341"), is(nullValue())); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelStateTests.java new file mode 100644 index 0000000000000..0e42a06111931 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelStateTests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; + +public class ModelStateTests extends ESTestCase { + + public void testExtractJobId_GivenValidDocId() { + assertThat(ModelState.extractJobId("foo_model_state_3151373783#1"), equalTo("foo")); + assertThat(ModelState.extractJobId("bar_model_state_451515#3"), equalTo("bar")); + assertThat(ModelState.extractJobId("foo_bar_model_state_blah_blah"), equalTo("foo_bar")); + assertThat(ModelState.extractJobId("_model_state_model_state_11111"), equalTo("_model_state")); + } + + public void testExtractJobId_GivenInvalidDocId() { + assertThat(ModelState.extractJobId(""), is(nullValue())); + assertThat(ModelState.extractJobId("foo"), is(nullValue())); + assertThat(ModelState.extractJobId("_model_3141341341"), is(nullValue())); + assertThat(ModelState.extractJobId("_state_3141341341"), is(nullValue())); + assertThat(ModelState.extractJobId("_model_state_3141341341"), is(nullValue())); + assertThat(ModelState.extractJobId("foo_quantiles"), is(nullValue())); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java index 84c1a161f1ee4..146e3ed5bd539 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java @@ -15,9 +15,26 @@ import java.util.Date; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; public class QuantilesTests extends AbstractSerializingTestCase { + public void testExtractJobId_GivenValidDocId() { + assertThat(Quantiles.extractJobId("foo_quantiles"), equalTo("foo")); + assertThat(Quantiles.extractJobId("bar_quantiles"), equalTo("bar")); + assertThat(Quantiles.extractJobId("foo_bar_quantiles"), equalTo("foo_bar")); + assertThat(Quantiles.extractJobId("_quantiles_quantiles"), equalTo("_quantiles")); + } + + public void testExtractJobId_GivenInvalidDocId() { + assertThat(Quantiles.extractJobId(""), is(nullValue())); + assertThat(Quantiles.extractJobId("foo"), is(nullValue())); + assertThat(Quantiles.extractJobId("_quantiles"), is(nullValue())); + assertThat(Quantiles.extractJobId("foo_model_state_3141341341"), is(nullValue())); + } + public void testEquals_GivenSameObject() { Quantiles quantiles = new Quantiles("foo", new Date(0L), "foo"); assertTrue(quantiles.equals(quantiles)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java index 0e1ca9dd9aec3..9ab2132b61912 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.ml.job.retention.ExpiredModelSnapshotsRemover; import org.elasticsearch.xpack.ml.job.retention.ExpiredResultsRemover; import org.elasticsearch.xpack.ml.job.retention.MlDataRemover; +import org.elasticsearch.xpack.ml.job.retention.UnusedStateRemover; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.elasticsearch.xpack.ml.utils.VolatileCursorIterator; @@ -56,7 +57,8 @@ private void deleteExpiredData(ActionListener List dataRemovers = Arrays.asList( new ExpiredResultsRemover(client, clusterService, auditor), new ExpiredForecastsRemover(client), - new ExpiredModelSnapshotsRemover(client, clusterService) + new ExpiredModelSnapshotsRemover(client, clusterService), + new UnusedStateRemover(client, clusterService) ); Iterator dataRemoversIterator = new VolatileCursorIterator<>(dataRemovers); deleteExpiredData(dataRemoversIterator, listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java index cf50579a0e517..d50a7c3f8c2ad 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java @@ -97,6 +97,7 @@ private SearchResponse initScroll() { searchRequest.source(new SearchSourceBuilder() .size(BATCH_SIZE) .query(getQuery()) + .fetchSource(shouldFetchSource()) .sort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))); SearchResponse searchResponse = client.search(searchRequest).actionGet(); @@ -123,6 +124,14 @@ private Deque mapHits(SearchResponse searchResponse) { return results; } + /** + * Should fetch source? Defaults to {@code true} + * @return whether the source should be fetched + */ + protected boolean shouldFetchSource() { + return true; + } + /** * Get the query to use for the search * @return the search query diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java new file mode 100644 index 0000000000000..92235570b47b5 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.persistence; + +import org.elasticsearch.client.Client; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; + +/** + * Iterates through the state doc ids + */ +public class BatchedStateDocIdsIterator extends BatchedDocumentsIterator { + + public BatchedStateDocIdsIterator(Client client, String index) { + super(client, index); + } + + @Override + protected boolean shouldFetchSource() { + return false; + } + + @Override + protected QueryBuilder getQuery() { + return QueryBuilders.matchAllQuery(); + } + + @Override + protected String map(SearchHit hit) { + return hit.getId(); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java new file mode 100644 index 0000000000000..b07b025e09e56 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.retention; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.core.ml.MLMetadataField; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.CategorizerState; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelState; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; +import org.elasticsearch.xpack.ml.job.persistence.BatchedStateDocIdsIterator; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Deque; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; + +/** + * If for any reason a job is deleted by some of its state documents + * are left behind, this class deletes any unused documents stored + * in the .ml-state index. + */ +public class UnusedStateRemover implements MlDataRemover { + + private static final Logger LOGGER = Loggers.getLogger(UnusedStateRemover.class); + + private final Client client; + private final ClusterService clusterService; + + public UnusedStateRemover(Client client, ClusterService clusterService) { + this.client = Objects.requireNonNull(client); + this.clusterService = Objects.requireNonNull(clusterService); + } + + @Override + public void remove(ActionListener listener) { + try { + BulkRequestBuilder deleteUnusedStateRequestBuilder = findUnusedStateDocs(); + if (deleteUnusedStateRequestBuilder.numberOfActions() > 0) { + executeDeleteUnusedStateDocs(deleteUnusedStateRequestBuilder, listener); + } else { + listener.onResponse(true); + } + } catch (Exception e) { + listener.onFailure(e); + } + } + + private BulkRequestBuilder findUnusedStateDocs() { + Set jobIds = getJobIds(); + BulkRequestBuilder deleteUnusedStateRequestBuilder = client.prepareBulk(); + BatchedStateDocIdsIterator stateDocIdsIterator = new BatchedStateDocIdsIterator(client, AnomalyDetectorsIndex.jobStateIndexName()); + while (stateDocIdsIterator.hasNext()) { + Deque stateDocIds = stateDocIdsIterator.next(); + for (String stateDocId : stateDocIds) { + String jobId = JobIdExtractor.extractJobId(stateDocId); + if (jobId == null) { + // not a managed state document id + continue; + } + if (jobIds.contains(jobId) == false) { + deleteUnusedStateRequestBuilder.add(new DeleteRequest( + AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, stateDocId)); + } + } + } + return deleteUnusedStateRequestBuilder; + } + + private Set getJobIds() { + ClusterState clusterState = clusterService.state(); + MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); + if (mlMetadata != null) { + return mlMetadata.getJobs().keySet(); + } + return Collections.emptySet(); + } + + private void executeDeleteUnusedStateDocs(BulkRequestBuilder deleteUnusedStateRequestBuilder, ActionListener listener) { + LOGGER.info("Found [{}] unused state documents; attempting to delete", + deleteUnusedStateRequestBuilder.numberOfActions()); + deleteUnusedStateRequestBuilder.execute(new ActionListener() { + @Override + public void onResponse(BulkResponse bulkItemResponses) { + if (bulkItemResponses.hasFailures()) { + LOGGER.error("Some unused state documents could not be deleted due to failures: {}", + bulkItemResponses.buildFailureMessage()); + } else { + LOGGER.info("Successfully deleted all unused state documents"); + } + listener.onResponse(true); + } + + @Override + public void onFailure(Exception e) { + LOGGER.error("Error deleting unused model state documents: ", e); + listener.onFailure(e); + } + }); + } + + private static class JobIdExtractor { + + private static List> extractors = Arrays.asList( + ModelState::extractJobId, Quantiles::extractJobId, CategorizerState::extractJobId); + + private static String extractJobId(String docId) { + String jobId; + for (Function extractor : extractors) { + jobId = extractor.apply(docId); + if (jobId != null) { + return jobId; + } + } + return null; + } + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java index 3a1fc2b0f6d4a..23bd5c7f7ddf1 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -8,12 +8,15 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -21,6 +24,7 @@ import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; @@ -31,13 +35,16 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class DeleteExpiredDataIT extends MlNativeAutodetectIntegTestCase { @@ -78,11 +85,16 @@ public void setUpData() throws IOException { } @After - public void tearDownData() throws Exception { + public void tearDownData() { client().admin().indices().prepareDelete(DATA_INDEX).get(); cleanUp(); } + public void testDeleteExpiredDataGivenNothingToDelete() throws Exception { + // Tests that nothing goes wrong when there's nothing to delete + client().execute(DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request()).get(); + } + public void testDeleteExpiredData() throws Exception { registerJob(newJobBuilder("no-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(null)); registerJob(newJobBuilder("results-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(null)); @@ -166,6 +178,18 @@ public void testDeleteExpiredData() throws Exception { assertThat(countForecastDocs(forecastStat.getJobId(), forecastStat.getForecastId()), equalTo(forecastStat.getRecordCount())); } + // Index some unused state documents (more than 10K to test scrolling works) + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < 10010; i++) { + String docId = "non_existing_job_" + randomFrom("model_state_1234567#" + i, "quantiles", "categorizer_state#" + i); + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.jobStateIndexName(), "doc", docId); + indexRequest.source(Collections.emptyMap()); + bulkRequestBuilder.add(indexRequest); + } + assertThat(bulkRequestBuilder.get().status(), equalTo(RestStatus.OK)); + + // Now call the action under test client().execute(DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request()).get(); // We need to refresh to ensure the deletion is visible @@ -216,6 +240,16 @@ public void testDeleteExpiredData() throws Exception { assertThat(countForecastDocs(job.getId(), forecastId), equalTo(0L)); } } + + // Verify .ml-state doesn't contain unused state documents + SearchResponse stateDocsResponse = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexName()) + .setFetchSource(false) + .setSize(10000) + .get(); + assertThat(stateDocsResponse.getHits().getTotalHits(), lessThan(10000L)); + for (SearchHit hit : stateDocsResponse.getHits().getHits()) { + assertThat(hit.getId().startsWith("non_existing_job"), is(false)); + } } private static Job.Builder newJobBuilder(String id) { From 663295d635d55f70d0e232221da96973c674bd37 Mon Sep 17 00:00:00 2001 From: lcawl Date: Thu, 17 May 2018 09:57:11 -0700 Subject: [PATCH 25/25] [DOCS] Replace X-Pack terms with attributes --- docs/reference/setup/install/windows.asciidoc | 22 +++++++++---------- .../authentication/built-in-users.asciidoc | 2 +- .../authentication/custom-realm.asciidoc | 2 +- x-pack/docs/en/security/gs-index.asciidoc | 6 ++--- x-pack/docs/en/security/index.asciidoc | 6 ++--- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index c48ec5de22d2c..861daa160e7b5 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -79,13 +79,13 @@ part of the installation, with the option to configure a HTTPS proxy through whi [[msi-installer-selected-plugins]] image::images/msi_installer/msi_installer_selected_plugins.png[] -Upon choosing to install X-Pack plugin, an additional step allows a choice of the type of X-Pack -license to install, in addition to X-Pack Security configuration and built-in user configuration: +Upon choosing to install {xpack} plugin, an additional step allows a choice of the type of {xpack} +license to install, in addition to {security} configuration and built-in user configuration: [[msi-installer-xpack]] image::images/msi_installer/msi_installer_xpack.png[] -NOTE: X-Pack includes a choice of a Trial or Basic license for 30 days. After that, you can obtain one of the +NOTE: {xpack} includes a choice of a Trial or Basic license for 30 days. After that, you can obtain one of the https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security]. The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension. @@ -286,43 +286,43 @@ as _properties_ within Windows Installer documentation) that can be passed to ms `XPACKLICENSE`:: - When installing X-Pack plugin, the type of license to install, + When installing {xpack} plugin, the type of license to install, either `Basic` or `Trial`. Defaults to `Basic` `XPACKSECURITYENABLED`:: - When installing X-Pack plugin with a `Trial` license, whether X-Pack Security should be enabled. + When installing {xpack} plugin with a `Trial` license, whether {security} should be enabled. Defaults to `true` `BOOTSTRAPPASSWORD`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password to + When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to used to bootstrap the cluster and persisted as the `bootstrap.password` setting in the keystore. Defaults to a randomized value. `SKIPSETTINGPASSWORDS`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, whether the + When installing {xpack} plugin with a `Trial` license and {security} enabled, whether the installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`. Defaults to `false` `ELASTICUSERPASSWORD`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password + When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to use for the built-in user `elastic`. Defaults to `""` `KIBANAUSERPASSWORD`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password + When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to use for the built-in user `kibana`. Defaults to `""` `LOGSTASHSYSTEMUSERPASSWORD`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password + When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to use for the built-in user `logstash_system`. Defaults to `""` To pass a value, simply append the property name and value using the format `=""` to -the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[X-Pack]: +the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[{xpack}]: ["source","sh",subs="attributes,callouts"] -------------------------------------------- diff --git a/x-pack/docs/en/security/authentication/built-in-users.asciidoc b/x-pack/docs/en/security/authentication/built-in-users.asciidoc index 74fc9f1e1db12..d18f441e293f1 100644 --- a/x-pack/docs/en/security/authentication/built-in-users.asciidoc +++ b/x-pack/docs/en/security/authentication/built-in-users.asciidoc @@ -118,7 +118,7 @@ the `logstash.yml` configuration file: xpack.monitoring.elasticsearch.password: logstashpassword ---------------------------------------------------------- -If you have upgraded from an older version of elasticsearch/x-pack, +If you have upgraded from an older version of Elasticsearch, the `logstash_system` user may have defaulted to _disabled_ for security reasons. Once the password has been changed, you can enable the user via the following API call: diff --git a/x-pack/docs/en/security/authentication/custom-realm.asciidoc b/x-pack/docs/en/security/authentication/custom-realm.asciidoc index 8e0114b7454c6..0ae33d434a1f5 100644 --- a/x-pack/docs/en/security/authentication/custom-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/custom-realm.asciidoc @@ -50,7 +50,7 @@ public AuthenticationFailureHandler getAuthenticationFailureHandler() { ---------------------------------------------------- + The `getAuthenticationFailureHandler` method is used to optionally provide a -custom `AuthenticationFailureHandler`, which will control how X-Pack responds +custom `AuthenticationFailureHandler`, which will control how {security} responds in certain authentication failure events. + [source,java] diff --git a/x-pack/docs/en/security/gs-index.asciidoc b/x-pack/docs/en/security/gs-index.asciidoc index b320b2a6d82e5..bf211c3692908 100644 --- a/x-pack/docs/en/security/gs-index.asciidoc +++ b/x-pack/docs/en/security/gs-index.asciidoc @@ -25,8 +25,8 @@ Security protects Elasticsearch clusters by: To prevent unauthorized access to your Elasticsearch cluster, you must have a way to _authenticate_ users. This simply means that you need a way to validate that a user is who they claim to be. For example, you have to make sure only -the person named _Kelsey Andorra_ can sign in as the user `kandorra`. X-Pack -Security provides a standalone authentication mechanism that enables you to +the person named _Kelsey Andorra_ can sign in as the user `kandorra`. {security} +provides a standalone authentication mechanism that enables you to quickly password-protect your cluster. If you're already using {xpack-ref}/ldap-realm.html[LDAP], {xpack-ref}/active-directory-realm.html[ Active Directory], or {xpack-ref}/pki-realm.html[ PKI] to manage users in your organization, {security} is able to integrate with those @@ -83,7 +83,7 @@ issues. * {xpack-ref}/ccs-clients-integrations.html[Integrations] shows you how to interact with an Elasticsearch cluster protected by - X-Pack Security. + {security}. * {xpack-ref}/security-reference.html[Reference] provides detailed information about the access privileges you can grant to diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc index d5f970a3fb826..ac055d535c5db 100644 --- a/x-pack/docs/en/security/index.asciidoc +++ b/x-pack/docs/en/security/index.asciidoc @@ -26,8 +26,8 @@ Security protects Elasticsearch clusters by: To prevent unauthorized access to your Elasticsearch cluster, you must have a way to _authenticate_ users. This simply means that you need a way to validate that a user is who they claim to be. For example, you have to make sure only -the person named _Kelsey Andorra_ can sign in as the user `kandorra`. X-Pack -Security provides a standalone authentication mechanism that enables you to +the person named _Kelsey Andorra_ can sign in as the user `kandorra`. {security} +provides a standalone authentication mechanism that enables you to quickly password-protect your cluster. If you're already using <>, <>, or <> to manage users in your organization, {security} is able to integrate with those @@ -81,7 +81,7 @@ issues. * <> shows you how to interact with an Elasticsearch cluster protected by - X-Pack Security. + {security}. * <> provides detailed information about the access privileges you can grant to