Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into migrate-kibana-report…
Browse files Browse the repository at this point in the history
…ing-to-data-stream
  • Loading branch information
dakrone committed Jul 24, 2023
2 parents fe51469 + 72aef7e commit b009b8a
Show file tree
Hide file tree
Showing 454 changed files with 6,026 additions and 2,458 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,22 @@ class SnykDependencyMonitoringGradlePluginFuncTest extends AbstractGradleInterna
version = "$version"
repositories {
mavenCentral()
mavenCentral()
}
dependencies {
implementation 'org.apache.lucene:lucene-monitor:9.2.0'
}
tasks.named('generateSnykDependencyGraph').configure {
remoteUrl = "http://acme.org"
}
"""
when:
def build = gradleRunner("generateSnykDependencyGraph").build()
then:
build.task(":generateSnykDependencyGraph").outcome == TaskOutcome.SUCCESS
JSONAssert.assertEquals(file("build/snyk/dependencies.json").text, """{
JSONAssert.assertEquals("""{
"meta": {
"method": "custom gradle",
"id": "gradle",
Expand Down Expand Up @@ -101,7 +105,7 @@ class SnykDependencyMonitoringGradlePluginFuncTest extends AbstractGradleInterna
{
"nodeId": "org.apache.lucene:lucene-core@9.2.0",
"deps": [
],
"pkgId": "org.apache.lucene:lucene-core@9.2.0"
},
Expand Down Expand Up @@ -155,7 +159,7 @@ class SnykDependencyMonitoringGradlePluginFuncTest extends AbstractGradleInterna
]
},
"target": {
"remoteUrl": "http://github.com/elastic/elasticsearch.git",
"remoteUrl": "http://acme.org",
"branch": "unknown"
},
"targetReference": "$version",
Expand All @@ -164,7 +168,7 @@ class SnykDependencyMonitoringGradlePluginFuncTest extends AbstractGradleInterna
"$expectedLifecycle"
]
}
}""", true)
}""", file("build/snyk/dependencies.json").text, true)

where:
version | expectedLifecycle
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,9 @@ private void checkModuleVersion(ModuleReference mref) {

private void checkModuleNamePrefix(ModuleReference mref) {
getLogger().info("{} checking module name prefix for {}", this, mref.descriptor().name());
if (mref.descriptor().name().startsWith("org.elasticsearch.") == false) {
throw new GradleException("Expected name starting with \"org.elasticsearch.\", in " + mref.descriptor());
if (mref.descriptor().name().startsWith("org.elasticsearch.") == false
&& mref.descriptor().name().startsWith("co.elastic.") == false) {
throw new GradleException("Expected name starting with \"org.elasticsearch.\" or \"co.elastic\" in " + mref.descriptor());
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ public class GenerateSnykDependencyGraph extends DefaultTask {
private final Property<String> projectPath;
private final Property<String> targetReference;
private final Property<String> version;
private final Property<String> remoteUrl;

@Inject
public GenerateSnykDependencyGraph(ObjectFactory objectFactory) {
Expand All @@ -66,6 +67,7 @@ public GenerateSnykDependencyGraph(ObjectFactory objectFactory) {
projectName = objectFactory.property(String.class);
projectPath = objectFactory.property(String.class);
version = objectFactory.property(String.class);
remoteUrl = objectFactory.property(String.class);
targetReference = objectFactory.property(String.class);
}

Expand Down Expand Up @@ -115,7 +117,7 @@ private Map<String, List<String>> projectAttributesData() {
}

private Object buildTargetData() {
return Map.of("remoteUrl", "http://github.com/elastic/elasticsearch.git", "branch", BuildParams.getGitRevision());
return Map.of("remoteUrl", remoteUrl.get(), "branch", BuildParams.getGitRevision());
}

@InputFiles
Expand Down Expand Up @@ -148,6 +150,11 @@ public Property<String> getGradleVersion() {
return gradleVersion;
}

@Input
public Property<String> getRemoteUrl() {
return remoteUrl;
}

@Input
public Property<String> getTargetReference() {
return targetReference;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

package org.elasticsearch.gradle.internal.snyk;

import org.elasticsearch.gradle.internal.conventions.info.GitInfo;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Configuration;
Expand Down Expand Up @@ -42,14 +43,16 @@ public void apply(Project project) {
generateSnykDependencyGraph.getGradleVersion().set(project.getGradle().getGradleVersion());
generateSnykDependencyGraph.getTargetReference()
.set(providerFactory.gradleProperty("snykTargetReference").orElse(projectVersion));
generateSnykDependencyGraph.getRemoteUrl()
.convention(providerFactory.provider(() -> GitInfo.gitInfo(project.getRootDir()).urlFromOrigin()));
generateSnykDependencyGraph.getOutputFile().set(projectLayout.getBuildDirectory().file("snyk/dependencies.json"));
});

project.getTasks().register(UPLOAD_TASK_NAME, UploadSnykDependenciesGraph.class, t -> {
t.getInputFile().set(generateTaskProvider.get().getOutputFile());
t.getToken().set(providerFactory.environmentVariable("SNYK_TOKEN"));
// the elasticsearch snyk project id
t.getProjectId().set(providerFactory.gradleProperty("snykProjectId"));
// the snyk org to target
t.getSnykOrganisation().set(providerFactory.gradleProperty("snykOrganisation"));
});

project.getPlugins().withType(JavaPlugin.class, javaPlugin -> generateTaskProvider.configure(generateSnykDependencyGraph -> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,12 @@ public class UploadSnykDependenciesGraph extends DefaultTask {
private final RegularFileProperty inputFile;
private final Property<String> token;
private final Property<String> url;
private final Property<String> projectId;
private final Property<String> snykOrganisation;

@Inject
public UploadSnykDependenciesGraph(ObjectFactory objectFactory) {
url = objectFactory.property(String.class).convention(DEFAULT_SERVER + GRADLE_GRAPH_ENDPOINT);
projectId = objectFactory.property(String.class);
snykOrganisation = objectFactory.property(String.class);
token = objectFactory.property(String.class);
inputFile = objectFactory.fileProperty();
}
Expand Down Expand Up @@ -76,7 +76,7 @@ void upload() {

private String calculateEffectiveEndpoint() {
String url = this.url.get();
return url.endsWith(GRADLE_GRAPH_ENDPOINT) ? url : projectId.map(id -> url + "?org=" + id).getOrElse(url);
return snykOrganisation.map(id -> url + "?org=" + id).getOrElse(url);
}

@Input
Expand All @@ -91,8 +91,8 @@ public Property<String> getUrl() {

@Input
@Optional
public Property<String> getProjectId() {
return projectId;
public Property<String> getSnykOrganisation() {
return snykOrganisation;
}

@InputFile
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/96515.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 96515
summary: Support boxplot aggregation in transform
area: Transform
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/97587.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97587
summary: Fix `sub_searches` serialization bug
area: Ranking
type: bug
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/97683.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97683
summary: Refactor nested field handling in `FieldFetcher`
area: Search
type: enhancement
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/97718.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 97718
summary: Fix async missing events
area: EQL
type: bug
issues:
- 97644
5 changes: 5 additions & 0 deletions docs/changelog/97732.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97732
summary: "[Fleet] Allow `kibana_system` to put datastream lifecycle"
area: Authorization
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/97741.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97741
summary: Upgrade xmlsec to 2.1.8
area: Authentication
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/97773.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97773
summary: "[Profiling] Support index migrations"
area: Application
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/97775.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97775
summary: Fix NPE in Desired Balance API
area: Allocation
type: bug
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/97840.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 97840
summary: Improve exception handling in Coordinator#publish
area: Cluster Coordination
type: bug
issues:
- 97798
5 changes: 5 additions & 0 deletions docs/changelog/97869.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97869
summary: Add missing sync on `indicesThatCannotBeCreated`
area: CRUD
type: bug
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/97899.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 97899
summary: Fork response reading in `TransportNodesAction`
area: Distributed
type: bug
issues: []
32 changes: 17 additions & 15 deletions docs/reference/aggregations/metrics/geoline-aggregation.asciidoc
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
[role="xpack"]
[[search-aggregations-metrics-geo-line]]
=== Geo-Line Aggregation
=== Geo-line aggregation
++++
<titleabbrev>Geo-Line</titleabbrev>
<titleabbrev>Geo-line</titleabbrev>
++++

The `geo_line` aggregation aggregates all `geo_point` values within a bucket into a `LineString` ordered
Expand Down Expand Up @@ -77,13 +77,12 @@ Which returns:
The resulting https://tools.ietf.org/html/rfc7946#section-3.2[GeoJSON Feature] contains both a `LineString` geometry
for the path generated by the aggregation, as well as a map of `properties`.
The property `complete` informs of whether all documents matched were used to generate the geometry.
The `size` option described below can be used to limit the number of documents included in the aggregation,
The <<search-aggregations-metrics-geo-line-size,`size` option>> can be used to limit the number of documents included in the aggregation,
leading to results with `complete: false`.
Exactly which documents are dropped from results depends on whether the aggregation is based
on `time_series` or not, and this is discussed in
<<search-aggregations-metrics-geo-line-grouping-time-series-advantages,more detail below>>.
Exactly which documents are dropped from results <<search-aggregations-metrics-geo-line-grouping-time-series-advantages,depends on whether the aggregation is based
on `time_series` or not>>.

The above result could be displayed in a map user interface:
This result could be displayed in a map user interface:

image:images/spatial/geo_line.png[Kibana map with museum tour of Amsterdam]

Expand Down Expand Up @@ -132,18 +131,19 @@ feature properties.
The line is sorted in ascending order by the sort key when set to "ASC", and in descending
with "DESC".

[[search-aggregations-metrics-geo-line-size]]
`size`::
(Optional, integer, default: `10000`) The maximum length of the line represented in the aggregation.
Valid sizes are between one and 10000.
Within <<search-aggregations-metrics-geo-line-grouping-time-series,`time_series`>>
the aggregation uses line simplification to constrain the size, otherwise it uses truncation.
See <<search-aggregations-metrics-geo-line-grouping-time-series-advantages,below>>
Refer to <<search-aggregations-metrics-geo-line-grouping-time-series-advantages>>
for a discussion on the subtleties involved.

[[search-aggregations-metrics-geo-line-grouping]]
==== Grouping

The simple example above will produce a single track for all the data selected by the query. However, it is far more
This simple example produces a single track for all the data selected by the query. However, it is far more
common to need to group the data into multiple tracks. For example, grouping flight transponder measurements by
flight call-sign before sorting each flight by timestamp and producing a separate track for each.

Expand Down Expand Up @@ -210,7 +210,7 @@ POST /tour/_bulk?refresh
[[search-aggregations-metrics-geo-line-grouping-terms]]
==== Grouping with terms

Using the above data, for a non-time-series use case, the grouping can be done using a
Using this data, for a non-time-series use case, the grouping can be done using a
<<search-aggregations-bucket-terms-aggregation,terms aggregation>> based on city name.
This would work whether or not we had defined the `tour` index as a time series index.

Expand Down Expand Up @@ -294,17 +294,19 @@ Which returns:
----
// TESTRESPONSE

The above results contain an array of buckets, where each bucket is a JSON object with the `key` showing the name
These results contain an array of buckets, where each bucket is a JSON object with the `key` showing the name
of the `city` field, and an inner aggregation result called `museum_tour` containing a
https://tools.ietf.org/html/rfc7946#section-3.2[GeoJSON Feature] describing the
actual route between the various attractions in that city.
Each result also includes a `properties` object with a `complete` value which will be `false` if the geometry
was truncated to the limits specified in the `size` parameter.
Note that when we use `time_series` in the example below, we will get the same results structured a little differently.
Note that when we use `time_series` in the next example, we will get the same results structured a little differently.

[[search-aggregations-metrics-geo-line-grouping-time-series]]
==== Grouping with time-series

preview::[]

Using the same data as before, we can also perform the grouping with a
<<search-aggregations-bucket-time-series-aggregation,`time_series` aggregation>>.
This will group by TSID, which is defined as the combinations of all fields with `time_series_dimension: true`,
Expand Down Expand Up @@ -337,7 +339,7 @@ NOTE: The `geo_line` aggregation no longer requires the `sort` field when nested
This is because the sort field is set to `@timestamp`, which all time-series indexes are pre-sorted by.
If you do set this parameter, and set it to something other than `@timestamp` you will get an error.

The above query will result in:
This query will result in:

[source,js]
----
Expand Down Expand Up @@ -400,7 +402,7 @@ The above query will result in:
----
// TESTRESPONSE

The above results are essentially the same as with the previous `terms` aggregation example, but structured differently.
These results are essentially the same as with the previous `terms` aggregation example, but structured differently.
Here we see the buckets returned as a map, where the key is an internal description of the TSID.
This TSID is unique for each unique combination of fields with `time_series_dimension: true`.
Each bucket contains a `key` field which is also a map of all dimension values for the TSID, in this case only the city
Expand All @@ -414,7 +416,7 @@ was simplified to the limits specified in the `size` parameter.
[[search-aggregations-metrics-geo-line-grouping-time-series-advantages]]
==== Why group with time-series?

When reviewing the above examples, you might think that there is little difference between using
When reviewing these examples, you might think that there is little difference between using
<<search-aggregations-bucket-terms-aggregation,`terms`>> or
<<search-aggregations-bucket-time-series-aggregation,`time_series`>>
to group the geo-lines. However, there are some important differences in behaviour between the two cases.
Expand Down
29 changes: 15 additions & 14 deletions docs/reference/how-to/size-your-shards.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -140,20 +140,21 @@ Every new backing index is an opportunity to further tune your strategy.

[discrete]
[[shard-size-recommendation]]
==== Aim for shard sizes between 10GB and 50GB

Larger shards take longer to recover after a failure. When a node fails, {es}
rebalances the node's shards across the data tier's remaining nodes. This
recovery process typically involves copying the shard contents across the
network, so a 100GB shard will take twice as long to recover than a 50GB shard.
In contrast, small shards carry proportionally more overhead and are less
efficient to search. Searching fifty 1GB shards will take substantially more
resources than searching a single 50GB shard containing the same data.

There are no hard limits on shard size, but experience shows that shards
between 10GB and 50GB typically work well for logs and time series data. You
may be able to use larger shards depending on your network and use case.
Smaller shards may be appropriate for
==== Aim for shards of up to 200M documents, or with sizes between 10GB and 50GB

There is some overhead associated with each shard, both in terms of cluster
management and search performance. Searching a thousand 50MB shards will be
substantially more expensive than searching a single 50GB shard containing the
same data. However, very large shards can also cause slower searches and will
take longer to recover after a failure.

There is no hard limit on the physical size of a shard, and each shard can in
theory contain up to just over two billion documents. However, experience shows
that shards between 10GB and 50GB typically work well for many use cases, as
long as the per-shard document count is kept below 200 million.

You may be able to use larger shards depending on your network and use case,
and smaller shards may be appropriate for
{enterprise-search-ref}/index.html[Enterprise Search] and similar use cases.

If you use {ilm-init}, set the <<ilm-rollover,rollover action>>'s
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/reference/images/index-mgmt/management_index_details.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
8 changes: 6 additions & 2 deletions docs/reference/indices/index-mgmt.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@ You can also filter indices using the search bar.

You can drill down into each index to investigate the index
<<index-modules-settings,settings>>, <<mapping,mapping>>, and statistics.
From this view, you can also edit the index settings.
From this view, you can also edit the index settings.

To view and explore the documents within an index, click the compass symbol next to the index name to open {kibana-ref}/discover.html[Discover].

[role="screenshot"]
image::images/index-mgmt/management_index_details.png[Index Management UI]
Expand Down Expand Up @@ -80,8 +82,10 @@ them.
To view more information about a data stream, such as its generation or its
current index lifecycle policy, click the stream's name.

To view and explore the data within a data stream, click the compass symbol next to the data stream name to open {kibana-ref}/discover.html[Discover].

[role="screenshot"]
image::images/index-mgmt/management_index_data_stream_stats.png[Data stream details]
image::images/index-mgmt/management_index_data_stream_stats2.png[Data stream details]

To view information about the stream's backing indices, click the number in the
*Indices* column.
Expand Down
Loading

0 comments on commit b009b8a

Please sign in to comment.