diff --git a/.backportrc.json b/.backportrc.json index 77b06cd419275..d2e92817c026b 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,9 +1,10 @@ { "upstream" : "elastic/elasticsearch", - "targetBranchChoices" : [ "main", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], + "targetBranchChoices" : [ "main", "8.x", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], "targetPRLabels" : [ "backport" ], "branchLabelMapping" : { - "^v8.16.0$" : "main", + "^v9.0.0$" : "main", + "^v8.16.0$" : "8.x", "^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$" : "$1.$2" } -} \ No newline at end of file +} diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index f698f722d977e..e7ba4ba7610cd 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -62,7 +62,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.25", "8.15.2", "8.16.0"] + BWC_VERSION: ["8.15.2", "8.16.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 3c98dd4b30e74..8ef8f5954887e 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -33,312 +33,6 @@ steps: env: {} - group: packaging-tests-upgrade steps: - - label: "{{matrix.image}} / 7.0.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.0.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.0.1 - - - label: "{{matrix.image}} / 7.1.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.1.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.1.1 - - - label: "{{matrix.image}} / 7.2.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.2.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.2.1 - - - label: "{{matrix.image}} / 7.3.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.3.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.3.2 - - - label: "{{matrix.image}} / 7.4.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.4.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.4.2 - - - label: "{{matrix.image}} / 7.5.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.5.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.5.2 - - - label: "{{matrix.image}} / 7.6.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.6.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.6.2 - - - label: "{{matrix.image}} / 7.7.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.7.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.7.1 - - - label: "{{matrix.image}} / 7.8.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.8.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.8.1 - - - label: "{{matrix.image}} / 7.9.3 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.3 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.9.3 - - - label: "{{matrix.image}} / 7.10.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.10.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.10.2 - - - label: "{{matrix.image}} / 7.11.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.11.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.11.2 - - - label: "{{matrix.image}} / 7.12.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.12.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.12.1 - - - label: "{{matrix.image}} / 7.13.4 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.4 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.13.4 - - - label: "{{matrix.image}} / 7.14.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.14.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.14.2 - - - label: "{{matrix.image}} / 7.15.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.15.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.15.2 - - - label: "{{matrix.image}} / 7.16.3 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.3 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.16.3 - - - label: "{{matrix.image}} / 7.17.25 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.25 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.17.25 - - label: "{{matrix.image}} / 8.0.1 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.1 timeout_in_minutes: 300 @@ -628,6 +322,23 @@ steps: env: BWC_VERSION: 8.16.0 + - label: "{{matrix.image}} / 9.0.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v9.0.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + diskSizeGb: 250 + env: + BWC_VERSION: 9.0.0 + - group: packaging-tests-windows steps: - label: "{{matrix.image}} / packaging-tests-windows" diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 4f862911a2d8c..5f75b7f1a2ef4 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -2,366 +2,6 @@ steps: - group: bwc steps: - - label: 7.0.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.0.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.1.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.1.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.1.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.2.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.2.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.2.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.3.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.3.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.3.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.4.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.4.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.4.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.5.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.5.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.5.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.6.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.6.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.6.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.7.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.7.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.7.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.8.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.8.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.8.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.9.3 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.3#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.9.3 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.10.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.10.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.10.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.11.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.11.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.11.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.12.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.12.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.12.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.13.4 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.4#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.13.4 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.14.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.14.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.14.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.15.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.15.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.15.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.16.3 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.3#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.16.3 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.17.25 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.25#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.17.25 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - label: 8.0.1 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.1#bwcTest timeout_in_minutes: 300 @@ -702,6 +342,26 @@ steps: - signal_reason: agent_stop limit: 3 + - label: 9.0.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v9.0.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + preemptible: true + diskSizeGb: 250 + env: + BWC_VERSION: 9.0.0 + retry: + automatic: + - exit_status: "-1" + limit: 3 + signal_reason: none + - signal_reason: agent_stop + limit: 3 + - label: concurrent-search-tests command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.jvm.argline=-Des.concurrent_search=true -Des.concurrent_search=true functionalTests timeout_in_minutes: 420 @@ -771,7 +431,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.25", "8.15.2", "8.16.0"] + BWC_VERSION: ["8.15.2", "8.16.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -821,7 +481,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.25", "8.15.2", "8.16.0"] + BWC_VERSION: ["8.15.2", "8.16.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 6c5aaa38717ef..498727b3ecd39 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -1,22 +1,4 @@ BWC_VERSION: - - "7.0.1" - - "7.1.1" - - "7.2.1" - - "7.3.2" - - "7.4.2" - - "7.5.2" - - "7.6.2" - - "7.7.1" - - "7.8.1" - - "7.9.3" - - "7.10.2" - - "7.11.2" - - "7.12.1" - - "7.13.4" - - "7.14.2" - - "7.15.2" - - "7.16.3" - - "7.17.25" - "8.0.1" - "8.1.3" - "8.2.3" @@ -34,3 +16,4 @@ BWC_VERSION: - "8.14.3" - "8.15.2" - "8.16.0" + - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index f00be923db67c..a2f1e0c675ea5 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - - "7.17.25" - "8.15.2" - "8.16.0" + - "9.0.0" diff --git a/REST_API_COMPATIBILITY.md b/REST_API_COMPATIBILITY.md index c36b4ea9dbfb0..4a6ad4e7e17f5 100644 --- a/REST_API_COMPATIBILITY.md +++ b/REST_API_COMPATIBILITY.md @@ -158,12 +158,12 @@ The above code checks the request's compatible version and if the request has th The primary means of testing compatibility is via the prior major version's YAML REST tests. The build system will download the latest prior version of the YAML rest tests and execute them against the current cluster version. Prior to execution the tests will be transformed by injecting the correct headers to enable compatibility as well as other custom changes to the tests to allow the tests to pass. These customizations are configured via the build.gradle and happen just prior to test execution. Since the compatibility tests are manipulated version of the tests stored in Github (via the past major version), it is important to find the local (on disk) version for troubleshooting compatibility tests. -The tests are wired into the `check` task, so that is the easiest way to test locally prior to committing. More specifically the task is called `yamlRestTestV7CompatTest`, where 7 is the version of tests that are executing. For example, version 8 of the server will have a task named `yamlRestTestV7CompatTest` and version 9 of the server will have a task named `yamlRestTestV8CompatTest`. These behaves nearly identical to it's non-compat `yamlRestTest` task. The only variance is that the tests are sourced from the prior version branch and the tests go through a transformation phase before execution. The transformation task is `yamlRestTestV7CompatTransform` where the Vnumber follows the same convention as the test. +The tests are wired into the `check` task, so that is the easiest way to test locally prior to committing. More specifically the task is called `yamlRestCompatTest`. These behave nearly identical to it's non-compat `yamlRestTest` task. The only variance is that the tests are sourced from the prior version branch and the tests go through a transformation phase before execution. The transformation task is `yamlRestCompatTestTransform`. For example: ```bash -./gradlew :rest-api-spec:yamlRestTestV7CompatTest +./gradlew :rest-api-spec:yamlRestCompatTest ``` Since these are a variation of backward compatibility testing, the entire suite of compatibility tests will be skipped anytime the backward compatibility testing is disabled. Since the source code for these tests live in a branch of code, disabling a specific test should be done via the transformation task configuration in build.gradle (i.e. `yamlRestTestV7CompatTransform`). @@ -188,7 +188,7 @@ Muting compatibility tests should be done via a test transform. A per test skip ```groovy -tasks.named("yamlRestTestV7CompatTransform").configure({ task -> +tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTestsByFilePattern("**/cat*/*.yml", "Cat API are not supported") task.skipTest("bulk/10_basic/Array of objects", "Muted due failures. See #12345") }) diff --git a/branches.json b/branches.json index 1d860501cbc87..e464d6179f2ba 100644 --- a/branches.json +++ b/branches.json @@ -4,6 +4,9 @@ { "branch": "main" }, + { + "branch": "8.x" + }, { "branch": "8.15" }, diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy index fc7ccd651d73b..8c5c84a276719 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy @@ -27,7 +27,7 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF buildFile << """ apply plugin: 'elasticsearch.internal-distribution-bwc-setup' """ - execute("git branch origin/8.0", file("cloned")) + execute("git branch origin/8.x", file("cloned")) execute("git branch origin/7.16", file("cloned")) execute("git branch origin/7.15", file("cloned")) } @@ -113,9 +113,9 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF result.task(":distribution:bwc:minor:buildBwcDarwinTar").outcome == TaskOutcome.SUCCESS and: "assemble task triggered" result.output.contains("[8.0.0] > Task :distribution:archives:darwin-tar:extractedAssemble") - result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-8.0/" + + result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-8.x/" + "distribution/archives/darwin-tar/build/install") - result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-8.0/" + + result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-8.x/" + "distribution/archives/darwin-tar/build/install/elasticsearch-8.0.0-SNAPSHOT") } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy index 737c448f23be6..3ffbd926ec847 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy @@ -19,10 +19,9 @@ import org.gradle.testkit.runner.TaskOutcome class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { - def compatibleVersion = Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() - 1 - def specIntermediateDir = "restResources/v${compatibleVersion}/yamlSpecs" - def testIntermediateDir = "restResources/v${compatibleVersion}/yamlTests" - def transformTask = ":yamlRestTestV${compatibleVersion}CompatTransform" + def specIntermediateDir = "restResources/compat/yamlSpecs" + def testIntermediateDir = "restResources/compat/yamlTests" + def transformTask = ":yamlRestCompatTestTransform" def YAML_FACTORY = new YAMLFactory() def MAPPER = new ObjectMapper(YAML_FACTORY) def READER = MAPPER.readerFor(ObjectNode.class) @@ -36,9 +35,11 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe buildApiRestrictionsDisabled = true } - def "yamlRestTestVxCompatTest does nothing when there are no tests"() { + def "yamlRestCompatTest does nothing when there are no tests"() { given: - subProject(":distribution:bwc:maintenance") << """ + internalBuild() + + subProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -46,26 +47,24 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe """ buildFile << """ - plugins { - id 'elasticsearch.legacy-yaml-rest-compat-test' - } + apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' """ when: - def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest", '--stacktrace').build() + def result = gradleRunner("yamlRestCompatTest", '--stacktrace').build() then: - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE + result.task(":yamlRestCompatTest").outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE result.task(transformTask).outcome == TaskOutcome.NO_SOURCE } - def "yamlRestTestVxCompatTest executes and copies api and transforms tests from :bwc:maintenance"() { + def "yamlRestCompatTest executes and copies api and transforms tests from :bwc:staged"() { given: internalBuild() - subProject(":distribution:bwc:maintenance") << """ + subProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -90,7 +89,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe String wrongTest = "wrong_version.yml" String additionalTest = "additional_test.yml" setupRestResources([wrongApi], [wrongTest]) //setups up resources for current version, which should not be used for this test - String sourceSetName = "yamlRestTestV" + compatibleVersion + "Compat" + String sourceSetName = "yamlRestCompatTest" addRestTestsToProject([additionalTest], sourceSetName) //intentionally adding to yamlRestTest source set since the .classes are copied from there file("src/yamlRestTest/java/MockIT.java") << "import org.junit.Test;class MockIT { @Test public void doNothing() { }}" @@ -98,14 +97,14 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe String api = "foo.json" String test = "10_basic.yml" //add the compatible test and api files, these are the prior version's normal yaml rest tests - file("distribution/bwc/maintenance/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" - file("distribution/bwc/maintenance/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" + file("distribution/bwc/staged/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" + file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" when: - def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() + def result = gradleRunner("yamlRestCompatTest").build() then: - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(":yamlRestCompatTest").outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SUCCESS result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SUCCESS result.task(transformTask).outcome == TaskOutcome.SUCCESS @@ -132,19 +131,20 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE when: - result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() + result = gradleRunner("yamlRestCompatTest").build() then: - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(":yamlRestCompatTest").outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatApiTask').outcome == TaskOutcome.UP_TO_DATE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.UP_TO_DATE result.task(transformTask).outcome == TaskOutcome.UP_TO_DATE } - def "yamlRestTestVxCompatTest is wired into check and checkRestCompat"() { + def "yamlRestCompatTest is wired into check and checkRestCompat"() { given: + internalBuild() withVersionCatalogue() - subProject(":distribution:bwc:maintenance") << """ + subProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -152,10 +152,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe """ buildFile << """ - plugins { - id 'elasticsearch.legacy-yaml-rest-compat-test' - } - + apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' """ when: @@ -164,7 +161,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe then: result.task(':check').outcome == TaskOutcome.UP_TO_DATE result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE + result.task(":yamlRestCompatTest").outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE result.task(transformTask).outcome == TaskOutcome.NO_SOURCE @@ -178,7 +175,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe then: result.task(':check').outcome == TaskOutcome.UP_TO_DATE result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(":yamlRestCompatTest").outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SKIPPED result.task(transformTask).outcome == TaskOutcome.SKIPPED @@ -188,7 +185,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe given: internalBuild() - subProject(":distribution:bwc:maintenance") << """ + subProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -204,7 +201,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe dependencies { yamlRestTestImplementation "junit:junit:4.12" } - tasks.named("yamlRestTestV${compatibleVersion}CompatTransform").configure({ task -> + tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("test/test/two", "This is a test to skip test two") task.replaceValueInMatch("_type", "_doc") task.replaceValueInMatch("_source.values", ["z", "x", "y"], "one") @@ -232,7 +229,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe setupRestResources([], []) - file("distribution/bwc/maintenance/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ + file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ "one": - do: do_.some.key_to_replace: @@ -279,7 +276,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe - match: {} """.stripIndent() when: - def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() + def result = gradleRunner("yamlRestCompatTest").build() then: @@ -302,22 +299,22 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe --- one: - do: - do_.some.key_that_was_replaced: - index: "test" - id: 1 - keyvalue : replacedkeyvalue do_.some.key_to_replace_in_two: no_change_here: "because it's not in test 'two'" warnings: - "warning1" - "warning2" headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "added allowed warning" allowed_warnings_regex: - "added allowed warning regex .* [0-9]" + do_.some.key_that_was_replaced: + index: "test" + id: 1 + keyvalue : "replacedkeyvalue" - match: _source.values: - "z" @@ -334,13 +331,14 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe - is_false: "replaced_value" - is_true: "value_not_to_replace" - is_false: "value_not_to_replace" - - length: { key.in_length_that_was_replaced: 1 } - - length: { value_to_replace: 99 } + - length: + key.in_length_that_was_replaced: 1 + - length: + value_to_replace: 99 - match: _source.added: name: "jake" likes: "cheese" - --- two: - skip: @@ -349,17 +347,17 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe get: index: "test2" id: 1 - do_.some.key_that_was_replaced_in_two: - changed_here: "because it is in test 'two'" headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" warnings_regex: - "regex warning here .* [a-z]" allowed_warnings: - "added allowed warning" allowed_warnings_regex: - "added allowed warning regex .* [0-9]" + do_.some.key_that_was_replaced_in_two: + changed_here: "because it is in test 'two'" - match: _source.values: - "foo" @@ -371,12 +369,12 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe - is_false: "replaced_value" - is_true: "value_not_to_replace" - is_false: "value_not_to_replace" - - length: { value_not_to_replace: 1 } + - length: + value_not_to_replace: 1 --- "use cat with no header": - do: - cat.indices: - {} + cat.indices: {} allowed_warnings: - "added allowed warning" allowed_warnings_regex: @@ -384,7 +382,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe - match: {} """.stripIndent()).readAll() - expectedAll.eachWithIndex{ ObjectNode expected, int i -> + expectedAll.eachWithIndex { ObjectNode expected, int i -> if(expected != actual.get(i)) { println("\nTransformed Test:") SequenceWriter sequenceWriter = WRITER.writeValues(System.out) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java index 41bfddb01e665..720c159f75552 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.gradle.internal; -import org.elasticsearch.gradle.Architecture; -import org.elasticsearch.gradle.ElasticsearchDistribution; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; @@ -27,7 +25,6 @@ import java.util.function.Predicate; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; import static java.util.Collections.unmodifiableList; @@ -67,7 +64,6 @@ public class BwcVersions { private static final Pattern LINE_PATTERN = Pattern.compile( "\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)?.*\\);" ); - private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); private static final String GLIBC_VERSION_ENV_VAR = "GLIBC_VERSION"; private final Version currentVersion; @@ -124,9 +120,7 @@ public UnreleasedVersionInfo unreleasedInfo(Version version) { } public void forPreviousUnreleased(Consumer consumer) { - filterSupportedVersions( - getUnreleased().stream().filter(version -> version.equals(currentVersion) == false).collect(Collectors.toList()) - ).stream().map(unreleased::get).forEach(consumer); + getUnreleased().stream().filter(version -> version.equals(currentVersion) == false).map(unreleased::get).forEach(consumer); } private String getBranchFor(Version version) { @@ -155,6 +149,7 @@ private Map computeUnreleased() { List unreleasedList = unreleased.stream().sorted(Comparator.reverseOrder()).toList(); Map result = new TreeMap<>(); + boolean newMinor = false; for (int i = 0; i < unreleasedList.size(); i++) { Version esVersion = unreleasedList.get(i); // This is either a new minor or staged release @@ -162,11 +157,17 @@ private Map computeUnreleased() { result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution")); } else if (esVersion.getRevision() == 0) { // If there are two upcoming unreleased minors then this one is the new minor - if (unreleasedList.get(i + 1).getRevision() == 0) { - result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:minor")); - } else { - result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:staged")); - } + if (newMinor == false && unreleasedList.get(i + 1).getRevision() == 0) { + result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor")); + newMinor = true; + } else if (newMinor == false + && unreleasedList.stream().filter(v -> v.getMajor() == esVersion.getMajor() && v.getRevision() == 0).count() == 1) { + // This is the only unreleased new minor which means we've not yet staged it for release + result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor")); + newMinor = true; + } else { + result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:staged")); + } } else { // If this is the oldest unreleased version and we have a maintenance release if (i == unreleasedList.size() - 1 && hasMaintenanceRelease) { @@ -226,16 +227,9 @@ private List getReleased() { } /** - * Return versions of Elasticsearch which are index compatible with the current version, and also work on the local machine. + * Return versions of Elasticsearch which are index compatible with the current version. */ public List getIndexCompatible() { - return filterSupportedVersions(getAllIndexCompatible()); - } - - /** - * Return all versions of Elasticsearch which are index compatible with the current version. - */ - public List getAllIndexCompatible() { return versions.stream().filter(v -> v.getMajor() >= (currentVersion.getMajor() - 1)).toList(); } @@ -248,7 +242,7 @@ public void withIndexCompatible(Predicate filter, BiConsumer getWireCompatible() { - return filterSupportedVersions(versions.stream().filter(v -> v.compareTo(MINIMUM_WIRE_COMPATIBLE_VERSION) >= 0).toList()); + return versions.stream().filter(v -> v.compareTo(getMinimumWireCompatibleVersion()) >= 0).toList(); } public void withWireCompatible(BiConsumer versionAction) { @@ -259,20 +253,6 @@ public void withWireCompatible(Predicate filter, BiConsumer versionAction.accept(v, "v" + v.toString())); } - private List filterSupportedVersions(List wireCompat) { - Predicate supported = v -> true; - if (Architecture.current() == Architecture.AARCH64) { - final String version; - if (ElasticsearchDistribution.CURRENT_PLATFORM.equals(ElasticsearchDistribution.Platform.DARWIN)) { - version = "7.16.0"; - } else { - version = "7.12.0"; // linux shipped earlier for aarch64 - } - supported = v -> v.onOrAfter(version); - } - return wireCompat.stream().filter(supported).collect(Collectors.toList()); - } - public List getUnreleasedIndexCompatible() { List unreleasedIndexCompatible = new ArrayList<>(getIndexCompatible()); unreleasedIndexCompatible.retainAll(getUnreleased()); @@ -286,7 +266,17 @@ public List getUnreleasedWireCompatible() { } public Version getMinimumWireCompatibleVersion() { - return MINIMUM_WIRE_COMPATIBLE_VERSION; + // Determine minimum wire compatible version from list of known versions. + // Current BWC policy states the minimum wire compatible version is the last minor release or the previous major version. + return versions.stream() + .filter(v -> v.getRevision() == 0) + .filter(v -> v.getMajor() == currentVersion.getMajor() - 1) + .max(Comparator.naturalOrder()) + .orElseThrow(() -> new IllegalStateException("Unable to determine minimum wire compatible version.")); + } + + public Version getCurrentVersion() { + return currentVersion; } public record UnreleasedVersionInfo(Version version, String branch, String gradleProjectPath) {} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index dd1d9d48252e1..3af59f00299b8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -30,7 +30,7 @@ public enum DockerBase { // Chainguard based wolfi image with latest jdk WOLFI( "docker.elastic.co/wolfi/chainguard-base:latest@sha256:c16d3ad6cebf387e8dd2ad769f54320c4819fbbaa21e729fad087c7ae223b4d0", - "wolfi", + "-wolfi", "apk" ); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 77af3445f530c..a170606800f39 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -78,6 +78,7 @@ public class RestTestBasePlugin implements Plugin { private static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadataDeps"; private static final String DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION = "defaultDistrofeaturesMetadataDeps"; private static final String TESTS_FEATURES_METADATA_PATH = "tests.features.metadata.path"; + private static final String MINIMUM_WIRE_COMPATIBLE_VERSION_SYSPROP = "tests.minimum.wire.compatible"; private final ProviderFactory providerFactory; @@ -173,6 +174,9 @@ public void apply(Project project) { task.systemProperty("tests.security.manager", "false"); task.systemProperty("tests.system_call_filter", "false"); + // Pass minimum wire compatible version which is used by upgrade tests + task.systemProperty(MINIMUM_WIRE_COMPATIBLE_VERSION_SYSPROP, BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + // Register plugins and modules as task inputs and pass paths as system properties to tests var modulePath = project.getObjects().fileCollection().from(modulesConfiguration); nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulePath::getAsPath); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java index e0581ebf67081..fd1446b5ff211 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java @@ -9,8 +9,8 @@ package org.elasticsearch.gradle.internal.test.rest.compat.compat; import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; +import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask; import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask; import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin; @@ -40,6 +40,7 @@ import java.io.File; import java.nio.file.Path; import java.util.Arrays; +import java.util.Comparator; import java.util.Map; import javax.inject.Inject; @@ -60,8 +61,7 @@ public abstract class AbstractYamlRestCompatTestPlugin implements Plugin v.getMajor() == currentMajor - 1) + .min(Comparator.reverseOrder()) + .get(); + String lastMinorProjectPath = BuildParams.getBwcVersions().unreleasedInfo(lastMinor).gradleProjectPath(); + // copy compatible rest specs Configuration bwcMinorConfig = project.getConfigurations().create(BWC_MINOR_CONFIG_NAME); - Dependency bwcMinor = project.getDependencies() - .project(Map.of("path", ":distribution:bwc:maintenance", "configuration", "checkout")); + Dependency bwcMinor = project.getDependencies().project(Map.of("path", lastMinorProjectPath, "configuration", "checkout")); project.getDependencies().add(bwcMinorConfig.getName(), bwcMinor); String projectPath = project.getPath(); @@ -183,7 +192,7 @@ public void apply(Project project) { // transform the copied tests task TaskProvider transformCompatTestTask = project.getTasks() - .register("yamlRestTestV" + COMPATIBLE_VERSION + "CompatTransform", RestCompatTestTransformTask.class, task -> { + .register("yamlRestCompatTestTransform", RestCompatTestTransformTask.class, task -> { task.getSourceDirectory().set(copyCompatYamlTestTask.flatMap(CopyRestTestsTask::getOutputResourceDir)); task.getOutputDirectory() .set(project.getLayout().getBuildDirectory().dir(compatTestsDir.resolve("transformed").toString())); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java index e84c84cc426a3..0bff8d65586d3 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java @@ -34,7 +34,7 @@ public LegacyYamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperation @Override public TaskProvider registerTestTask(Project project, SourceSet sourceSet) { - return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getTaskName(null, "test")); + return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getName()); } @Override diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java index 79588ca722ff1..b376284761ff0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java @@ -32,7 +32,7 @@ public YamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperations file @Override public TaskProvider registerTestTask(Project project, SourceSet sourceSet) { - return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getTaskName(null, "test"), StandaloneRestIntegTestTask.class); + return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getName(), StandaloneRestIntegTestTask.class); } @Override diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy index 39a9af38e6a9c..8fa1ac9ea2094 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy @@ -10,12 +10,9 @@ package org.elasticsearch.gradle.internal import spock.lang.Specification -import org.elasticsearch.gradle.Architecture -import org.elasticsearch.gradle.ElasticsearchDistribution import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.BwcVersions.UnreleasedVersionInfo - class BwcVersionsSpec extends Specification { List versionLines = [] @@ -42,11 +39,12 @@ class BwcVersionsSpec extends Specification { unreleased == [ (v('7.16.2')): new UnreleasedVersionInfo(v('7.16.2'), '7.16', ':distribution:bwc:bugfix'), (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.17', ':distribution:bwc:staged'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.0', ':distribution:bwc:minor'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.x', ':distribution:bwc:minor'), (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') ] bwc.wireCompatible == [v('7.17.0'), v('8.0.0'), v('8.1.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.16.2'), v('7.17.0'), v('8.0.0'), v('8.1.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.16.2'), v('7.17.0'), v('8.0.0'), v('8.1.0')] + bwc.minimumWireCompatibleVersion == v('7.17.0') } def "current version is next minor with next major and last minor both staged"() { @@ -71,11 +69,11 @@ class BwcVersionsSpec extends Specification { unreleased == [ (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:bugfix'), (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.17', ':distribution:bwc:staged'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.0', ':distribution:bwc:minor'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.x', ':distribution:bwc:minor'), (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') ] bwc.wireCompatible == [v('7.17.0'), v('8.0.0'), v('8.1.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('8.0.0'), v('8.1.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('8.0.0'), v('8.1.0')] } def "current is next minor with upcoming minor staged"() { @@ -104,7 +102,7 @@ class BwcVersionsSpec extends Specification { (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') ] bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.1.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.1.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.1.0')] } def "current version is staged major"() { @@ -131,7 +129,61 @@ class BwcVersionsSpec extends Specification { (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'main', ':distribution'), ] bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0')] + } + + def "current version is major with unreleased next minor"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('7.16.1', '8.10.0') + addVersion('7.17.0', '8.10.0') + addVersion('8.0.0', '9.0.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.0.0')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:bugfix'), + (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.x', ':distribution:bwc:minor'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'main', ':distribution'), + ] + bwc.wireCompatible == [v('7.17.0'), v('8.0.0')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('8.0.0')] + } + + def "current version is major with staged next minor"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('7.17.0', '8.10.0') + addVersion('8.0.0', '9.0.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.0.0')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.15.2')): new UnreleasedVersionInfo(v('7.15.2'), '7.15', ':distribution:bwc:bugfix'), + (v('7.16.0')): new UnreleasedVersionInfo(v('7.16.0'), '7.16', ':distribution:bwc:staged'), + (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.x', ':distribution:bwc:minor'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'main', ':distribution'), + ] + bwc.wireCompatible == [v('7.17.0'), v('8.0.0')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.17.0'), v('8.0.0')] } def "current version is next bugfix"() { @@ -159,7 +211,7 @@ class BwcVersionsSpec extends Specification { (v('8.0.1')): new UnreleasedVersionInfo(v('8.0.1'), 'main', ':distribution'), ] bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1')] } def "current version is next minor with no staged releases"() { @@ -189,7 +241,7 @@ class BwcVersionsSpec extends Specification { (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') ] bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')] } private void addVersion(String elasticsearch, String lucene) { @@ -202,12 +254,4 @@ class BwcVersionsSpec extends Specification { return Version.fromString(version) } - private boolean osxAarch64() { - Architecture.current() == Architecture.AARCH64 && - ElasticsearchDistribution.CURRENT_PLATFORM.equals(ElasticsearchDistribution.Platform.DARWIN) - } - - private List osFiltered(ArrayList versions) { - return osxAarch64() ? versions.findAll {it.onOrAfter("7.16.0")} : versions - } } diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 1dd9fb95bd17b..edb97a2968bc8 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,4 +1,4 @@ -elasticsearch = 8.16.0 +elasticsearch = 9.0.0 lucene = 9.11.1 bundled_jdk_vendor = openjdk diff --git a/build.gradle b/build.gradle index 01fdace570ce0..8430ac335d447 100644 --- a/build.gradle +++ b/build.gradle @@ -135,7 +135,7 @@ tasks.register("updateCIBwcVersions") { } doLast { - writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.allIndexCompatible)) + writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible)) writeVersions(file(".ci/snapshotBwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible)) expandBwcList( ".buildkite/pipelines/intake.yml", @@ -149,7 +149,7 @@ tasks.register("updateCIBwcVersions") { new ListExpansion(versions: filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible), variable: "BWC_LIST"), ], [ - new StepExpansion(templatePath: ".buildkite/pipelines/periodic.bwc.template.yml", versions: filterIntermediatePatches(BuildParams.bwcVersions.allIndexCompatible), variable: "BWC_STEPS"), + new StepExpansion(templatePath: ".buildkite/pipelines/periodic.bwc.template.yml", versions: filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible), variable: "BWC_STEPS"), ] ) @@ -157,7 +157,7 @@ tasks.register("updateCIBwcVersions") { ".buildkite/pipelines/periodic-packaging.yml", ".buildkite/pipelines/periodic-packaging.template.yml", ".buildkite/pipelines/periodic-packaging.bwc.template.yml", - filterIntermediatePatches(BuildParams.bwcVersions.allIndexCompatible) + filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible) ) } } @@ -186,7 +186,7 @@ tasks.register("verifyVersions") { .collect { Version.fromString(it) } ) } - verifyCiYaml(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.allIndexCompatible)) + verifyCiYaml(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible)) verifyCiYaml(file(".ci/snapshotBwcVersions"), BuildParams.bwcVersions.unreleasedIndexCompatible) // Make sure backport bot config file is up to date diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml index 58477c68f0e7c..a5bfcd5b0882e 100644 --- a/docs/changelog/111972.yaml +++ b/docs/changelog/111972.yaml @@ -5,11 +5,13 @@ type: feature issues: [] highlight: title: Add global retention in data stream lifecycle - body: "Data stream lifecycle now supports configuring retention on a cluster level,\ - \ namely global retention. Global retention \nallows us to configure two different\ - \ retentions:\n\n- `data_streams.lifecycle.retention.default` is applied to all\ - \ data streams managed by the data stream lifecycle that do not have retention\n\ - defined on the data stream level.\n- `data_streams.lifecycle.retention.max` is\ - \ applied to all data streams managed by the data stream lifecycle and it allows\ - \ any data stream \ndata to be deleted after the `max_retention` has passed." + body: |- + Data stream lifecycle now supports configuring retention on a cluster level, + namely global retention. Global retention \nallows us to configure two different + retentions: + + - `data_streams.lifecycle.retention.default` is applied to all data streams managed + by the data stream lifecycle that do not have retention defined on the data stream level. + - `data_streams.lifecycle.retention.max` is applied to all data streams managed by the + data stream lifecycle and it allows any data stream \ndata to be deleted after the `max_retention` has passed. notable: true diff --git a/docs/changelog/112348.yaml b/docs/changelog/112348.yaml new file mode 100644 index 0000000000000..84110a7cd4f1b --- /dev/null +++ b/docs/changelog/112348.yaml @@ -0,0 +1,6 @@ +pr: 112348 +summary: Introduce repository integrity verification API +area: Snapshot/Restore +type: enhancement +issues: + - 52622 diff --git a/docs/changelog/112451.yaml b/docs/changelog/112451.yaml new file mode 100644 index 0000000000000..aa852cf5e2a1a --- /dev/null +++ b/docs/changelog/112451.yaml @@ -0,0 +1,29 @@ +pr: 112451 +summary: Update data stream lifecycle telemetry to track global retention +area: Data streams +type: breaking +issues: [] +breaking: + title: Update data stream lifecycle telemetry to track global retention + area: REST API + details: |- + In this release we introduced global retention settings that fulfil the following criteria: + + - a data stream managed by the data stream lifecycle, + - a data stream that is not an internal data stream. + + As a result, we defined different types of retention: + + - **data retention**: the retention configured on data stream level by the data stream user or owner + - **default global retention:** the retention configured by an admin on a cluster level and applied to any + data stream that doesn't have data retention and fulfils the criteria. + - **max global retention:** the retention configured by an admin to guard against having long retention periods. + Any data stream that fulfills the criteria will adhere to the data retention unless it exceeds the max retention, + in which case the max global retention applies. + - **effective retention:** the retention that applies on the data stream that fulfill the criteria at a given moment + in time. It takes into consideration all the retention above and resolves it to the retention that will take effect. + + Considering the above changes, having a field named `retention` in the usage API was confusing. For this reason, we + renamed it to `data_retention` and added telemetry about the other configurations too. + impact: Users that use the field `data_lifecycle.retention` should use the `data_lifecycle.data_retention` + notable: false diff --git a/docs/changelog/112610.yaml b/docs/changelog/112610.yaml new file mode 100644 index 0000000000000..3d67a80a8f0b3 --- /dev/null +++ b/docs/changelog/112610.yaml @@ -0,0 +1,6 @@ +pr: 112610 +summary: Support widening of numeric types in union-types +area: ES|QL +type: bug +issues: + - 111277 diff --git a/docs/changelog/112687.yaml b/docs/changelog/112687.yaml new file mode 100644 index 0000000000000..dd079e1b700c4 --- /dev/null +++ b/docs/changelog/112687.yaml @@ -0,0 +1,5 @@ +pr: 112687 +summary: Add `TaskManager` to `pluginServices` +area: Infra/Metrics +type: enhancement +issues: [] diff --git a/docs/changelog/112703.yaml b/docs/changelog/112703.yaml new file mode 100644 index 0000000000000..a428e8c4e2339 --- /dev/null +++ b/docs/changelog/112703.yaml @@ -0,0 +1,5 @@ +pr: 112703 +summary: JSON parse failures should be 4xx codes +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/112720.yaml b/docs/changelog/112720.yaml new file mode 100644 index 0000000000000..a44ea5a699520 --- /dev/null +++ b/docs/changelog/112720.yaml @@ -0,0 +1,5 @@ +pr: 112720 +summary: Fix NPE in `dense_vector` stats +area: Vector Search +type: bug +issues: [] diff --git a/docs/reference/quickstart/run-elasticsearch-locally.asciidoc b/docs/reference/quickstart/run-elasticsearch-locally.asciidoc index 8c75510ae860f..24e0f3f22350e 100644 --- a/docs/reference/quickstart/run-elasticsearch-locally.asciidoc +++ b/docs/reference/quickstart/run-elasticsearch-locally.asciidoc @@ -114,6 +114,8 @@ docker run -p 127.0.0.1:5601:5601 -d --name kibana --network elastic-net \ {kib-docker-image} ---- +When you access {kib}, use `elastic` as the username and the password you set earlier for the `ELASTIC_PASSWORD` environment variable. + [NOTE] ==== The service is started with a trial license. The trial license enables all features of Elasticsearch for a trial period of 30 days. After the trial period expires, the license is downgraded to a basic license, which is free forever. If you prefer to skip the trial and use the basic license, set the value of the `xpack.license.self_generated.type` variable to basic instead. For a detailed feature comparison between the different licenses, refer to our https://www.elastic.co/subscriptions[subscriptions page]. diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index e10240a66fbb9..a54dbe21b46c6 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -377,10 +377,19 @@ GET /_xpack/usage "enabled": true, "count": 0, "default_rollover_used": true, - "retention": { - "minimum_millis": 0, - "maximum_millis": 0, - "average_millis": 0.0 + "data_retention": { + "configured_data_streams": 0 + }, + "effective_retention": { + "retained_data_streams": 0 + }, + "global_retention": { + "default": { + "defined": false + }, + "max": { + "defined": false + } } }, "data_tiers" : { diff --git a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc index 6cdf65ba54e7e..b8bb6a2cd7d13 100644 --- a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc +++ b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc @@ -28,6 +28,7 @@ For more information, see <>. include::put-repo-api.asciidoc[] include::verify-repo-api.asciidoc[] include::repo-analysis-api.asciidoc[] +include::verify-repo-integrity-api.asciidoc[] include::get-repo-api.asciidoc[] include::delete-repo-api.asciidoc[] include::clean-up-repo-api.asciidoc[] diff --git a/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc b/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc new file mode 100644 index 0000000000000..99ae126b401f5 --- /dev/null +++ b/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc @@ -0,0 +1,232 @@ +[role="xpack"] +[[verify-repo-integrity-api]] +=== Verify repository integrity API +++++ +Verify repository integrity +++++ + +Verifies the integrity of the contents of a snapshot repository. + +//// +[source,console] +---- +PUT /_snapshot/my_repository +{ + "type": "fs", + "settings": { + "location": "my_backup_location" + } +} +---- +// TESTSETUP +//// + +[source,console] +---- +POST /_snapshot/my_repository/_verify_integrity +---- + +[[verify-repo-integrity-api-request]] +==== {api-request-title} + +`POST /_snapshot//_verify_integrity` + +[[verify-repo-integrity-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `manage` +<> to use this API. For more +information, see <>. + +[[verify-repo-integrity-api-desc]] +==== {api-description-title} + +This API allows you to perform a comprehensive check of the contents of a +repository, looking for any anomalies in its data or metadata which might +prevent you from restoring snapshots from the repository or which might cause +future snapshot create or delete operations to fail. + +If you suspect the integrity of the contents of one of your snapshot +repositories, cease all write activity to this repository immediately, set its +`read_only` option to `true`, and use this API to verify its integrity. Until +you do so: + +* It may not be possible to <> from this repository. + +* <> may report errors when searched, or may have + unassigned shards. + +* <> into this repository may fail, + or may appear to succeed having created a snapshot which cannot be restored. + +* <> from this repository may fail, or + may appear to succeed leaving the underlying data on disk. + +* Continuing to write to the repository while it is in an invalid state may + causing additional damage to its contents. + +If the <> API finds any problems with the integrity +of the contents of your repository, {es} will not be able to repair the damage. +The only way to bring the repository back into a fully working state after its +contents have been damaged is by restoring its contents from a +<> which was taken before the +damage occurred. You must also identify what caused the damage and take action +to prevent it from happening again. + +If you cannot restore a repository backup, +<> and use this for +all future snapshot operations. In some cases it may be possible to recover +some of the contents of a damaged repository, either by +<> as many of its snapshots as needed and +<> of the restored data, or by +using the <> API to copy data from any <> +mounted from the damaged repository. + +Avoid all operations which write to the repository while the +<> API is running. If something changes the +repository contents while an integrity verification is running then {es} may +incorrectly report having detected some anomalies in its contents due to the +concurrent writes. It may also incorrectly fail to report some anomalies that +the concurrent writes prevented it from detecting. + +NOTE: This API is intended for exploratory use by humans. You should expect the +request parameters and the response format to vary in future versions. + +NOTE: This API may not work correctly in a mixed-version cluster. + +[[verify-repo-integrity-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +Name of the snapshot repository whose integrity to verify. + +[[verify-repo-integrity-api-query-params]] +==== {api-query-parms-title} + +The default values for the parameters of this API are designed to limit the +impact of the integrity verification on other activities in your cluster. For +instance, by default it will only use at most half of the `snapshot_meta` +threads to verify the integrity of each snapshot, allowing other snapshot +operations to use the other half of this thread pool. + +If you modify these parameters to speed up the verification process, you risk +disrupting other snapshot-related operations in your cluster. For large +repositories, consider setting up a separate single-node {es} cluster just for +running the integrity verification API. + +`snapshot_verification_concurrency`:: +(Optional, integer) Specifies the number of snapshots to verify concurrently. +Defaults to `0` which means to use at most half of the `snapshot_meta` thread +pool at once. + +`index_verification_concurrency`:: +(Optional, integer) Specifies the number of indices to verify concurrently. +Defaults to `0` which means to use the entire `snapshot_meta` thread pool. + +`meta_thread_pool_concurrency`:: +(Optional, integer) Specifies the maximum number of snapshot metadata +operations to execute concurrently. Defaults to `0` which means to use at most +half of the `snapshot_meta` thread pool at once. + +`index_snapshot_verification_concurrency`:: +(Optional, integer) Specifies the maximum number of index snapshots to verify +concurrently within each index verification. Defaults to `1`. + +`max_failed_shard_snapshots`:: +(Optional, integer) Limits the number of shard snapshot failures to track +during integrity verification, in order to avoid excessive resource usage. If +your repository contains more than this number of shard snapshot failures then +the verification will fail. Defaults to `10000`. + +`verify_blob_contents`:: +(Optional, boolean) Specifies whether to verify the checksum of every data blob +in the repository. Defaults to `false`. If this feature is enabled, {es} will +read the entire repository contents, which may be extremely slow and expensive. + +`blob_thread_pool_concurrency`:: +(Optional, integer) If `?verify_blob_contents` is `true`, this parameter +specifies how many blobs to verify at once. Defaults to `1`. + +`max_bytes_per_sec`:: +(Optional, <>) +If `?verify_blob_contents` is `true`, this parameter specifies the maximum +amount of data that {es} will read from the repository every second. Defaults +to `10mb`. + +[role="child_attributes"] +[[verify-repo-integrity-api-response-body]] +==== {api-response-body-title} + +The response exposes implementation details of the analysis which may change +from version to version. The response body format is therefore not considered +stable and may be different in newer versions. + +`log`:: +(array) A sequence of objects that report the progress of the analysis. ++ +.Properties of `log` +[%collapsible%open] +==== +`timestamp_in_millis`:: +(integer) The timestamp of this log entry, represented as the number of +milliseconds since the {wikipedia}/Unix_time[Unix epoch]. + +`timestamp`:: +(string) The timestamp of this log entry, represented as a string formatted +according to {wikipedia}/ISO_8601[ISO 8601]. Only included if the +<> flag is set. + +`snapshot`:: +(object) If the log entry pertains to a particular snapshot then the snapshot +will be described in this object. + +`index`:: +(object) If the log entry pertains to a particular index then the index will be +described in this object. + +`snapshot_restorability`:: +(object) If the log entry pertains to the restorability of an index then the +details will be described in this object. + +`anomaly`:: +(string) If the log entry pertains to an anomaly in the repository contents then +this string will describe the anomaly. + +`exception`:: +(object) If the log entry pertains to an exception that {es} encountered during +the verification then the details will be included in this object. + +==== + +`results`:: +(object) An object which describes the final results of the analysis. ++ +.Properties of `results` +[%collapsible%open] +==== +`status`:: +(object) The final status of the analysis task. + +`final_repository_generation`:: +(integer) The repository generation at the end of the analysis. If there were +any writes to the repository during the analysis then this value will be +different from the `generation` reported in the task status, and the analysis +may have detected spurious anomalies due to the concurrent writes, or may even +have failed to detect some anomalies in the repository contents. + +`total_anomalies`:: +(integer) The total number of anomalies detected during the analysis. + +`result`:: +(string) The final result of the analysis. If the repository contents appear to +be intact then this will be the string `pass`. If this field is missing, or +contains some other value, then the repository contents were not fully +verified. + +==== + +`exception`:: +(object) If the analysis encountered an exception which prevented it from +completing successfully then this exception will be reported here. diff --git a/docs/reference/snapshot-restore/register-repository.asciidoc b/docs/reference/snapshot-restore/register-repository.asciidoc index 28b0640a8fae5..2147ad3c684f3 100644 --- a/docs/reference/snapshot-restore/register-repository.asciidoc +++ b/docs/reference/snapshot-restore/register-repository.asciidoc @@ -272,7 +272,9 @@ filesystem snapshot of this repository. When restoring a repository from a backup, you must not register the repository with {es} until the repository contents are fully restored. If you alter the contents of a repository while it is registered with {es} then the repository -may become unreadable or may silently lose some of its contents. +may become unreadable or may silently lose some of its contents. After +restoring a repository from a backup, use the <> API +to verify its integrity before you start to use the repository. include::repository-azure.asciidoc[] include::repository-gcs.asciidoc[] diff --git a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java index 74acb00925e5a..e0c18f35f6cb0 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java +++ b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java @@ -17,13 +17,16 @@ */ public enum RestApiVersion { + V_9(9), + V_8(8), - @UpdateForV9 // v9 will not need to support the v7 REST API V_7(7); public final byte major; + @UpdateForV9 + // We need to bump current and previous to V_9 and V_8, respectively private static final RestApiVersion CURRENT = V_8; private static final RestApiVersion PREVIOUS = V_7; @@ -49,6 +52,7 @@ public static RestApiVersion minimumSupported() { public static Predicate equalTo(RestApiVersion restApiVersion) { return switch (restApiVersion) { + case V_9 -> r -> r.major == V_9.major; case V_8 -> r -> r.major == V_8.major; case V_7 -> r -> r.major == V_7.major; }; @@ -56,11 +60,14 @@ public static Predicate equalTo(RestApiVersion restApiVersion) { public static Predicate onOrAfter(RestApiVersion restApiVersion) { return switch (restApiVersion) { + case V_9 -> r -> r.major >= V_9.major; case V_8 -> r -> r.major >= V_8.major; case V_7 -> r -> r.major >= V_7.major; }; } + @UpdateForV9 + // Right now we return api version 8 for major version 9 until we bump the api version above public static RestApiVersion forMajor(int major) { switch (major) { case 7 -> { @@ -69,6 +76,9 @@ public static RestApiVersion forMajor(int major) { case 8 -> { return V_8; } + case 9 -> { + return V_8; + } default -> throw new IllegalArgumentException("Unknown REST API version " + major); } } diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java index c59f003d9cb04..63191084ca837 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java @@ -111,7 +111,7 @@ public String text() throws IOException { } private void throwOnNoText() { - throw new IllegalStateException("Can't get text on a " + currentToken() + " at " + getTokenLocation()); + throw new IllegalArgumentException("Expected text at " + getTokenLocation() + " but found " + currentToken()); } @Override diff --git a/modules/aggregations/build.gradle b/modules/aggregations/build.gradle index 91f3303d9d4a8..5e233f423aa14 100644 --- a/modules/aggregations/build.gradle +++ b/modules/aggregations/build.gradle @@ -36,30 +36,6 @@ if (BuildParams.isSnapshotBuild() == false) { } } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("search.aggregation/20_terms/string profiler via global ordinals filters implementation", "The profiler results aren't backwards compatible.") - task.skipTest("search.aggregation/20_terms/string profiler via global ordinals native implementation", "The profiler results aren't backwards compatible.") - task.skipTest("search.aggregation/20_terms/string profiler via map", "The profiler results aren't backwards compatible.") - task.skipTest("search.aggregation/20_terms/numeric profiler", "The profiler results aren't backwards compatible.") - task.skipTest("search.aggregation/210_top_hits_nested_metric/top_hits aggregation with sequence numbers", "#42809 the use nested path and filter sort throws an exception") - task.skipTest("search.aggregation/370_doc_count_field/Test filters agg with doc_count", "Uses profiler for assertions which is not backwards compatible") - - // In 8.9.0, the default t-digest algorithm changed from AVL-tree-based to hybrid, combining a sorted array of samples with a merging - // implementation. This change leads to slight different percentile results, compared to previous versions. - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Basic test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Non-keyed test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Only aggs test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Explicit Percents test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Metadata test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Filtered test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/420_percentile_ranks_tdigest_metric/filtered", "Hybrid t-digest produces different results.") - - // Something has changed with response codes - task.skipTest("search.aggregation/20_terms/IP test", "Hybrid t-digest produces different results.") - - task.addAllowedWarningRegex("\\[types removal\\].*") -} - artifacts { restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 1fc42a1b294fe..b43124f52552b 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -29,13 +29,6 @@ dependencies { clusterModules project(':modules:mapper-extras') } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("indices.analyze/10_analyze/htmlStrip_deprecated", "Cleanup versioned deprecations in analysis #41560") - task.skipTest("analysis-common/40_token_filters/delimited_payload_filter_error", "Remove preconfigured delimited_payload_filter #43686") - task.skipTest("analysis-common/20_analyzers/standard_html_strip", "Cleanup versioned deprecations in analysis #41560") - task.skipTest("search.query/50_queries_with_synonyms/Test common terms query with stacked tokens", "#42654 - `common` query throws an exception") -} - artifacts { restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java index c18cb3dddf0ae..4c8e88a0cedbf 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -52,25 +52,6 @@ public void testNGramFilterInCustomAnalyzerDeprecationError() throws IOException ex.getMessage() ); } - - final Settings settingsPre7 = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put( - IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) - ) - .put("index.analysis.analyzer.custom_analyzer.type", "custom") - .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") - .put("index.analysis.filter.my_ngram.type", "nGram") - .build(); - try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); - assertWarnings( - "The [nGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [ngram] instead." - ); - } } /** @@ -101,26 +82,6 @@ public void testEdgeNGramFilterInCustomAnalyzerDeprecationError() throws IOExcep ex.getMessage() ); } - - final Settings settingsPre7 = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put( - IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) - ) - .put("index.analysis.analyzer.custom_analyzer.type", "custom") - .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") - .put("index.analysis.filter.my_ngram.type", "edgeNGram") - .build(); - - try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); - assertWarnings( - "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [edge_ngram] instead." - ); - } } /** @@ -128,39 +89,6 @@ public void testEdgeNGramFilterInCustomAnalyzerDeprecationError() throws IOExcep * disallow usages for indices created after 8.0 */ public void testNGramTokenizerDeprecation() throws IOException { - // tests for prebuilt tokenizer - doTestPrebuiltTokenizerDeprecation( - "nGram", - "ngram", - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), - false - ); - doTestPrebuiltTokenizerDeprecation( - "edgeNGram", - "edge_ngram", - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), - false - ); - doTestPrebuiltTokenizerDeprecation( - "nGram", - "ngram", - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_6_0, - IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) - ), - true - ); - doTestPrebuiltTokenizerDeprecation( - "edgeNGram", - "edge_ngram", - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_6_0, - IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) - ), - true - ); expectThrows( IllegalArgumentException.class, () -> doTestPrebuiltTokenizerDeprecation( @@ -179,40 +107,6 @@ public void testNGramTokenizerDeprecation() throws IOException { true ) ); - - // same batch of tests for custom tokenizer definition in the settings - doTestCustomTokenizerDeprecation( - "nGram", - "ngram", - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), - false - ); - doTestCustomTokenizerDeprecation( - "edgeNGram", - "edge_ngram", - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), - false - ); - doTestCustomTokenizerDeprecation( - "nGram", - "ngram", - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_6_0, - IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) - ), - true - ); - doTestCustomTokenizerDeprecation( - "edgeNGram", - "edge_ngram", - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_6_0, - IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) - ), - true - ); expectThrows( IllegalArgumentException.class, () -> doTestCustomTokenizerDeprecation( diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java index 412e3ba3e380a..48bc60b5ad0b4 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java @@ -17,14 +17,12 @@ import org.elasticsearch.index.IndexService.IndexCreationContext; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; import java.io.StringReader; @@ -47,61 +45,10 @@ private IndexAnalyzers buildAnalyzers(IndexVersion version, String tokenizer) th } public void testPreConfiguredTokenizer() throws IOException { - - // Before 7.3 we return ngrams of length 1 only - { - IndexVersion version = IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_7_3_0) - ); - try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edge_ngram")) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t" }); - } - } - - // Check deprecated name as well - { - IndexVersion version = IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_7_3_0) - ); - try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edgeNGram")) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t" }); - } - } - - // Afterwards, we return ngrams of length 1 and 2, to match the default factory settings - { - try (IndexAnalyzers indexAnalyzers = buildAnalyzers(IndexVersion.current(), "edge_ngram")) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" }); - } - } - - // Check deprecated name as well, needs version before 8.0 because throws IAE after that - { - try ( - IndexAnalyzers indexAnalyzers = buildAnalyzers( - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_3_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0) - ), - "edgeNGram" - ) - ) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" }); - - } + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(IndexVersion.current(), "edge_ngram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" }); } } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java index 7a2bd2a822988..16288c754e922 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java @@ -337,7 +337,7 @@ public void testShingleFilters() { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.synonyms.type", "synonym") @@ -391,7 +391,7 @@ public void testPreconfiguredTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); @@ -423,7 +423,7 @@ public void testDisallowedTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .putList("common_words", "a", "b") diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java index 68e6d6661f944..39fda06363033 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.index.IndexService.IndexCreationContext; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalysisTestsHelper; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -25,7 +24,6 @@ import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; import java.io.StringReader; @@ -180,61 +178,26 @@ public void testIgnoreKeywords() throws IOException { } public void testPreconfiguredFilter() throws IOException { - // Before 7.3 we don't adjust offsets - { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); - Settings indexSettings = Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_7_3_0) - ) - ) - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") - .build(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - - try ( - IndexAnalyzers indexAnalyzers = new AnalysisModule( - TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()), - new StablePluginsRegistry() - ).getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings) - ) { - - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 0 }, new int[] { 4, 4 }); - - } - } - - // Afger 7.3 we do adjust offsets - { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); - Settings indexSettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") - .build(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - try ( - IndexAnalyzers indexAnalyzers = new AnalysisModule( - TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()), - new StablePluginsRegistry() - ).getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings) - ) { + try ( + IndexAnalyzers indexAnalyzers = new AnalysisModule( + TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin()), + new StablePluginsRegistry() + ).getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings) + ) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 1 }, new int[] { 1, 4 }); + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 1 }, new int[] { 1, 4 }); - } } } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 99d4f8bb7cd28..d8f8ae9d080a7 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -561,7 +561,13 @@ private Set waitForInProgressOrTriggerDownsampling( * Issues a request downsample the source index to the downsample index for the specified round. */ private void downsampleIndexOnce(DataStreamLifecycle.Downsampling.Round round, String sourceIndex, String downsampleIndexName) { - DownsampleAction.Request request = new DownsampleAction.Request(sourceIndex, downsampleIndexName, null, round.config()); + DownsampleAction.Request request = new DownsampleAction.Request( + TimeValue.THIRTY_SECONDS /* TODO should this be longer/configurable? */, + sourceIndex, + downsampleIndexName, + null, + round.config() + ); transportActionsDeduplicator.executeOnce( request, new ErrorRecordingActionListener( diff --git a/modules/health-shards-availability/build.gradle b/modules/health-shards-availability/build.gradle index 6c7cf5a19c8ac..b98824d84af94 100644 --- a/modules/health-shards-availability/build.gradle +++ b/modules/health-shards-availability/build.gradle @@ -19,7 +19,3 @@ restResources { include '_common', 'indices', 'index', 'cluster', 'nodes', 'get', 'ingest' } } - -tasks.named("yamlRestTestV7CompatTransform").configure {task -> - task.addAllowedWarningRegex("setting \\[ecs\\] is deprecated as ECS format is the default and only option") -} diff --git a/modules/ingest-attachment/build.gradle b/modules/ingest-attachment/build.gradle index 89f0b530713c6..f708448c10d7a 100644 --- a/modules/ingest-attachment/build.gradle +++ b/modules/ingest-attachment/build.gradle @@ -138,14 +138,6 @@ tasks.named("forbiddenPatterns").configure { exclude '**/text-cjk-*.txt' } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - // 2 new tika metadata fields are returned in v8 - task.replaceValueInLength("_source.attachment", 8, "Test ingest attachment processor with .doc file") - task.replaceValueInLength("_source.attachment", 8, "Test ingest attachment processor with .docx file") - // Tika 2.4.0 adds an extra newline for each embedded attachment, making the content_length larger - task.replaceValueInMatch("_source.attachment.content_length", 20, "Test ingest attachment processor with .docx file") -} - tasks.named("thirdPartyAudit").configure { ignoreMissingClasses() } @@ -153,5 +145,5 @@ tasks.named("thirdPartyAudit").configure { if (BuildParams.inFipsJvm) { tasks.named("test").configure { enabled = false } tasks.named("yamlRestTest").configure { enabled = false }; - tasks.named("yamlRestTestV7CompatTest").configure { enabled = false }; + tasks.named("yamlRestCompatTest").configure { enabled = false }; } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index d7100745680ba..ee923132aa6a6 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -49,7 +49,3 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.commons.logging.LogFactory', ) } - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.addAllowedWarningRegex("\\[types removal\\].*") -} diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index bc5bb165cd0d2..64a679581f76d 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -84,11 +84,6 @@ tasks.named("dependencyLicenses").configure { ignoreFile 'elastic-geoip-database-service-agreement-LICENSE.txt' } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTestsByFilePattern("**/ingest_geoip/20_geoip_processor.yml", "from 8.0 yaml rest tests use geoip test fixture and default geoip are no longer packaged. In 7.x yaml tests used default databases which makes tests results very different, so skipping these tests") - // task.skipTest("lang_mustache/50_multi_search_template/Multi-search template with errors", "xxx") -} - artifacts { restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } diff --git a/modules/ingest-geoip/qa/full-cluster-restart/build.gradle b/modules/ingest-geoip/qa/full-cluster-restart/build.gradle index a97664923438b..e930b4ca38233 100644 --- a/modules/ingest-geoip/qa/full-cluster-restart/build.gradle +++ b/modules/ingest-geoip/qa/full-cluster-restart/build.gradle @@ -20,12 +20,10 @@ dependencies { javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"), "javaRestTest")) } -assert Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() == 8 : - "If we are targeting a branch other than 8, we should enable migration tests" // once we are ready to test migrations from 8.x to 9.x, we can set the compatible version to 8.0.0 // see https://github.com/elastic/elasticsearch/pull/93666 -BuildParams.bwcVersions.withWireCompatible(v -> v.before("7.0.0")) { bwcVersion, baseName -> +BuildParams.bwcVersions.withWireCompatible(v -> v.before("9.0.0")) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/modules/ingest-geoip/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/ingest/geoip/FullClusterRestartIT.java b/modules/ingest-geoip/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/ingest/geoip/FullClusterRestartIT.java index 4f8abf4b82390..b4d1788688119 100644 --- a/modules/ingest-geoip/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/ingest/geoip/FullClusterRestartIT.java +++ b/modules/ingest-geoip/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/ingest/geoip/FullClusterRestartIT.java @@ -12,7 +12,9 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.client.Request; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.distribution.DistributionType; @@ -31,6 +33,8 @@ import static org.hamcrest.Matchers.contains; +@UpdateForV9 +@LuceneTestCase.AwaitsFix(bugUrl = "we need to figure out the index migrations here for 9.0") public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { private static final boolean useFixture = Boolean.getBoolean("geoip_use_service") == false; diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index 64cd38c584820..d124770e33cce 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -18,7 +18,3 @@ restResources { include '_common', 'indices', 'index', 'cluster', 'nodes', 'get', 'ingest' } } - -tasks.named("yamlRestTestV7CompatTransform").configure {task -> - task.addAllowedWarningRegex("setting \\[ecs\\] is deprecated as ECS format is the default and only option") -} diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index c36275699e21f..3cbcabed20a98 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -26,7 +26,3 @@ restResources { } } -tasks.named("yamlRestTestV7CompatTransform").configure {task -> - task.addAllowedWarningRegex("\\[types removal\\].*") - task.replaceValueInMatch("responses.1.error.root_cause.0.type", "x_content_e_o_f_exception", "Multi-search template with errors") -} diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index cc557ac2289f6..0b2882934a122 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -61,48 +61,6 @@ tasks.named("test").configure { jvmArgs '-XX:-OmitStackTraceInFastThrow', '-XX:-HeapDumpOnOutOfMemoryError' } -tasks.named("yamlRestTestV7CompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'painless/20_scriptfield/Scripted Field Doing Compare (fields api)', - 'painless/70_execute_painless_scripts/Execute with double field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with double field context (single-value, fields api)', - '70_execute_painless_scripts/Execute with geo point field context (multi-value, fields api)', - '70_execute_painless_scripts/Execute with ip field context (single-value, fields api)', - '70_execute_painless_scripts/Execute with boolean field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with boolean field context (multi-value, fields api)', - 'painless/40_fields_api/date to long', - 'painless/130_metric_agg/Scripted Metric Agg Total (fields api)', - 'painless/70_execute_painless_scripts/Execute with keyword field context (multi-value, fields api)', - 'painless/100_terms_agg/Double Value Script with doc notation (fields api)', - 'painless/100_terms_agg/Long Value Script with doc notation (fields api)', - 'painless/20_scriptfield/Access a date (fields api)', - 'painless/70_execute_painless_scripts/Execute with date field context (multi-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with keyword field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with long field context (multi-value, fields api)', - 'painless/20_scriptfield/Scripted Field (fields api)', - 'painless/70_execute_painless_scripts/Execute with long field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with geo point field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with date field context (single-value, fields api)', - 'painless/40_fields_api/missing field', - 'painless/40_fields_api/sort script fields api', - 'painless/20_scriptfield/Access many dates (fields api)', - 'painless/70_execute_painless_scripts/Execute with long field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with geo point field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with date field context (single-value, fields api)', - 'painless/40_fields_api/missing field', - 'painless/40_fields_api/sort script fields api', - 'painless/20_scriptfield/Access many dates (fields api)', - 'painless/100_terms_agg/String Value Script with doc notation (fields api)', - 'painless/40_fields_api/string to long and bigint', - 'painless/40_fields_api/boolean to long and bigint', - 'painless/40_fields_api/script fields api for dates', - 'painless/70_execute_painless_scripts/Execute with double field context (multi-value, fields api)', - 'painless/40_fields_api/filter script fields api', - 'painless/40_fields_api/script score fields api', - 'painless/70_mov_fn_agg/*' // Agg moved to a module. - ].join(',') -} - esplugin.bundleSpec.into("spi") { from(configurations.spi) } diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java index 4fa1d7b7a3108..0e9d7ca5f15c8 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; import org.elasticsearch.geometry.Line; @@ -342,6 +343,8 @@ public void testParsePolygon() throws IOException, ParseException { assertGeometryEquals(p, polygonGeoJson, false); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParse3DPolygon() throws IOException, ParseException { XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() .startObject() diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java index 6e8a61277cccf..74340e705b578 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.MultiLine; @@ -301,6 +302,8 @@ public void testParseMixedDimensionPolyWithHole() throws IOException, ParseExcep assertThat(e, hasToString(containsString("coordinate dimensions do not match"))); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0)); @@ -334,6 +337,8 @@ public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { assertThat(e, hasToString(containsString("unable to add coordinate to CoordinateBuilder: coordinate dimensions do not match"))); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParsePolyWithStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0, 0)); @@ -357,6 +362,8 @@ public void testParsePolyWithStoredZ() throws IOException { assertEquals(shapeBuilder.numDimensions(), 3); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseOpenPolygon() throws IOException { String openPolygon = "POLYGON ((100 5, 100 10, 90 10, 90 5))"; diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java index 0a0bb12bedbae..407f372bee26a 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; +import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; @@ -19,6 +20,7 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Point; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -53,6 +55,8 @@ import static org.mockito.Mockito.when; @SuppressWarnings("deprecation") +@UpdateForV9 +@AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldMapperTests extends MapperTestCase { @Override diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java index dc74b9cd295ce..a64352c5306e1 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java @@ -7,7 +7,9 @@ */ package org.elasticsearch.legacygeo.mapper; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.geo.SpatialStrategy; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.FieldTypeTestCase; @@ -20,6 +22,8 @@ import java.util.List; import java.util.Map; +@UpdateForV9 +@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldTypeTests extends FieldTypeTestCase { /** diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index 844478c83e7c7..3a1d8a396c4be 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -19,7 +19,3 @@ restResources { include '_common', 'bulk', 'cluster', 'get', 'nodes', 'indices', 'index', 'search' } } - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("/30_inner_hits/profile fetch", "profile output has changed") -} diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index b9b257a42e051..041fbb8bce340 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -23,7 +23,3 @@ restResources { include '_common', 'get', 'indices', 'index', 'search', 'msearch' } } - -tasks.named("yamlRestTestV7CompatTransform").configure{ task -> - task.addAllowedWarningRegex("\\[types removal\\].*") -} diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 9cd7963224cf8..9e1e1e842ba58 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -160,30 +160,3 @@ if (OS.current() == OS.WINDOWS) { } } } - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("reindex/20_validation/reindex without source gives useful error message", "exception with a type. Not much benefit adding _doc there.") - task.skipTest("update_by_query/20_validation/update_by_query without source gives useful error message", "exception with a type. Not much benefit adding _doc there.") - - // these tests are all relying on a call to refresh all indices, when they could easily be changed - // in 7.x to call the specific index they want to refresh. - // See https://github.com/elastic/elasticsearch/issues/81188 - task.skipTest("delete_by_query/70_throttle/Rethrottle to -1 which turns off throttling", "test relies on system index being non-hidden") - task.skipTest("delete_by_query/80_slices/Multiple slices with rethrottle", "test relies on system index being non-hidden") - task.skipTest("delete_by_query/80_slices/Multiple slices with wait_for_completion=false", "test relies on system index being non-hidden") - task.skipTest("reindex/80_slices/Multiple slices with rethrottle", "test relies on system index being non-hidden") - task.skipTest("reindex/80_slices/Multiple slices with wait_for_completion=false", "test relies on system index being non-hidden") - task.skipTest("update_by_query/70_slices/Multiple slices with rethrottle", "test relies on system index being non-hidden") - task.skipTest("update_by_query/70_slices/Multiple slices with wait_for_completion=false", "test relies on system index being non-hidden") - - task.addAllowedWarningRegex("\\[types removal\\].*") -} - -tasks.named("yamlRestTestV7CompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'update_by_query/80_scripting/Can\'t change _id', - 'update_by_query/80_scripting/Set unsupported operation type', - 'update_by_query/80_scripting/Setting bogus context is an error', - - ].join(',') -} diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java index ebe4b1835b103..f02695c63a7e7 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java @@ -264,12 +264,12 @@ public void testMultipleFeatureMigration() throws Exception { .setAliasName(".second-internal-managed-alias") .setPrimaryIndex(".second-int-man-old") .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) - .setSettings(createSettings(IndexVersions.V_7_0_0, 0)) + .setSettings(createSettings(IndexVersions.MINIMUM_COMPATIBLE, 0)) .setMappings(createMapping(true, true)) .setOrigin(ORIGIN) .setVersionMetaKey(VERSION_META_KEY) .setAllowedElasticProductOrigins(Collections.emptyList()) - .setMinimumNodeVersion(Version.V_7_0_0) + .setMinimumNodeVersion(Version.CURRENT.minimumCompatibilityVersion()) .setPriorSystemIndexDescriptors(Collections.emptyList()) .build(); diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 3fe2f9d9bae42..3537d430e212b 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -33,11 +33,6 @@ dependencies { internalClusterTestImplementation project(':test:fixtures:url-fixture') } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("repository_url/10_basic/Restore with repository-url using file://", "Error message has changed") - task.skipTest("repository_url/10_basic/Restore with repository-url using http://", "Error message has changed") -} - tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( 'javax.servlet.ServletContextEvent', diff --git a/modules/runtime-fields-common/build.gradle b/modules/runtime-fields-common/build.gradle index 5a2d268cf7a4e..f9485b6ed3027 100644 --- a/modules/runtime-fields-common/build.gradle +++ b/modules/runtime-fields-common/build.gradle @@ -21,10 +21,3 @@ dependencies { api project(':libs:elasticsearch-grok') api project(':libs:elasticsearch-dissect') } - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("runtime_fields/100_geo_point/fetch fields from source", "Format changed. Old format was a bug.") - task.skipTest("runtime_fields/101_geo_point_from_source/fetch fields from source", "Format changed. Old format was a bug.") - task.skipTest("runtime_fields/102_geo_point_source_in_query/fetch fields from source", "Format changed. Old format was a bug.") - task.skipTest("runtime_fields/103_geo_point_calculated_at_index/fetch fields from source", "Format changed. Old format was a bug.") -} diff --git a/muted-tests.yml b/muted-tests.yml index 589e49645c986..ac35776db665f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -154,9 +154,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112471 - class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/111497 -- class: org.elasticsearch.script.mustache.LangMustacheClientYamlTestSuiteIT - method: test {yaml=lang_mustache/50_multi_search_template/Multi-search template with errors} - issue: https://github.com/elastic/elasticsearch/issues/112580 +- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT + method: test {yaml=ingest/80_ingest_simulate/Test ingest simulate with reroute and mapping validation from templates} + issue: https://github.com/elastic/elasticsearch/issues/112575 - class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests method: testClientServiceMutualAuthentication issue: https://github.com/elastic/elasticsearch/issues/112529 @@ -199,6 +199,15 @@ tests: - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testDelete_multipleRequest issue: https://github.com/elastic/elasticsearch/issues/112701 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testCreateJobInSharedIndexUpdatesMapping + issue: https://github.com/elastic/elasticsearch/issues/112729 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testGetJob_GivenNoSuchJob + issue: https://github.com/elastic/elasticsearch/issues/112730 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/cluster/stats/line_1450} + issue: https://github.com/elastic/elasticsearch/issues/112732 # Examples: # diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 1c7db6d040be5..eed88b3232a45 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -43,7 +43,3 @@ tasks.named("dependencyLicenses").configure { mapping from: /lucene-.*/, to: 'lucene' } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("analysis_icu/10_basic/Normalization with deprecated unicodeSetFilter", "Cleanup versioned deprecations in analysis #41560") -} - diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java index 348e9f5fae7c8..c83d8b789611f 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java @@ -43,7 +43,7 @@ public void testDisallowedWithSynonyms() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java index c57112d0455c6..120b6bdf3288c 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -36,7 +36,8 @@ @TestCaseOrdering(FullClusterRestartTestOrdering.class) public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { - private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); + + private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString(System.getProperty("tests.minimum.wire.compatible")); private static final String OLD_CLUSTER_VERSION = System.getProperty("tests.old_cluster_version"); private static IndexVersion oldIndexVersion; private static boolean upgradeFailed = false; diff --git a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index fe2236adc4904..b0025302701af 100644 --- a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -419,7 +419,7 @@ public void testRecoveryClosedIndex() throws Exception { } final IndexVersion indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(IndexVersions.V_7_2_0)) { + if (indexVersionCreated.onOrAfter(IndexVersions.V_8_0_0)) { // index was created on a version that supports the replication of closed indices, // so we expect the index to be closed and replicated ensureGreen(indexName); @@ -448,7 +448,7 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)) { + if (minimumIndexVersion().onOrAfter(IndexVersions.V_8_0_0)) { // index is created on a version that supports the replication of closed indices, // so we expect the index to be closed and replicated ensureGreen(indexName); @@ -483,9 +483,9 @@ public void testClosedIndexNoopRecovery() throws Exception { closeIndex(indexName); } - if (indexVersionCreated(indexName).onOrAfter(IndexVersions.V_7_2_0)) { + if (indexVersionCreated(indexName).onOrAfter(IndexVersions.V_8_0_0)) { // index was created on a version that supports the replication of closed indices, so we expect it to be closed and replicated - assertTrue(minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)); + assertTrue(minimumIndexVersion().onOrAfter(IndexVersions.V_8_0_0)); ensureGreen(indexName); assertClosedIndex(indexName, true); if (CLUSTER_TYPE != ClusterType.OLD) { diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 089b7470e9a97..015c9c4b812c6 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -41,194 +41,6 @@ dependencies { clusterModules project(':modules:data-streams') } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - - task.skipTestsByFilePattern("**/cat*/*.yml", "Cat API are meant to be consumed by humans, so will not be supported by Compatible REST API") - task.skipTestsByFilePattern("**/indices.upgrade/*.yml", "upgrade api will only get a dummy endpoint returning an exception suggesting to use _reindex") - task.skipTestsByFilePattern("**/indices.stats/60_field_usage/*/*.yml", "field usage results will be different between lucene versions") - task.skipTestsByFilePattern("**/search.aggregation/*.yml", "run by the aggregation module") - - task.skipTest("bulk/11_dynamic_templates/Dynamic templates", "Error message has changed") - task.skipTest("index/80_date_nanos/date_nanos requires dates after 1970 and before 2262", "Error message has changed") - task.skipTest("indices.create/20_mix_typeless_typeful/Implicitly create a typed index while there is a typeless template", "Type information about the type is removed and not passed down. The logic to check for this is also removed.") - task.skipTest("indices.create/20_mix_typeless_typeful/Implicitly create a typeless index while there is a typed template", "Type information about the type is removed and not passed down. The logic to check for this is also removed.") - task.skipTest("delete/70_mix_typeless_typeful/DELETE with typeless API on an index that has types", "Type information about the type is removed and not passed down. The logic to check for this is also removed."); - task.skipTest("get/100_mix_typeless_typeful/GET with typeless API on an index that has types", "Failing due to not recognising missing type (the type path param is ignored, will no be fixed"); - task.skipTest("indices.get_field_mapping/21_missing_field_with_types/Return empty object if field doesn't exist, but type and index do", "This test returns test_index.mappings:{} when {} was expected. difference between 20_missing_field and 21_missing_field_with_types?") - task.skipTest("indices.get_field_mapping/30_missing_type/Raise 404 when type doesn't exist", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.get_mapping/20_missing_type/Existent and non-existent type returns 404 and the existing type", " The information about the type is not present in the index. hence it cannot know if the type exist or not") - task.skipTest("indices.get_mapping/20_missing_type/Existent and non-existent types returns 404 and the existing type", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.get_mapping/20_missing_type/No type matching pattern returns 404", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.get_mapping/20_missing_type/Non-existent type returns 404", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.get_mapping/20_missing_type/Type missing when no types exist", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.put_mapping/20_mix_typeless_typeful/PUT mapping with _doc on an index that has types", "The information about the type is not present in the index. hence it cannot know if the type was already used or not") - task.skipTest("indices.put_mapping/20_mix_typeless_typeful/PUT mapping with typeless API on an index that has types", "The information about the type is not present in the index. hence it cannot know if the type was already used or not") - task.skipTest("search/160_exists_query/Test exists query on _type field", "There is a small distinction between empty mappings and no mappings at all. The code to implement this test was refactored #54003; field search on _type field- not implementing. The data for _type is considered incorrect in this search") - task.skipTest("termvectors/50_mix_typeless_typeful/Term vectors with typeless API on an index that has types", "type information is not stored, hence the the index will be found") - task.skipTest("mget/11_default_index_type/Default index/type", "mget - these use cases are no longer valid because we always default to _doc.; This mean test cases where there is assertion on not finding by type won't work") - task.skipTest("mget/16_basic_with_types/Basic multi-get", "mget - these use cases are no longer valid, because we always default to _doc.; This mean test cases where there is assertion on not finding by type won't work") - task.skipTest("explain/40_mix_typeless_typeful/Explain with typeless API on an index that has types", "asserting about type not found won't work as we ignore the type information") - task.skipTest("indices.stats/20_translog/Translog retention settings are deprecated", "translog settings removal is not supported under compatible api") - task.skipTest("indices.stats/20_translog/Translog retention without soft_deletes", "translog settings removal is not supported under compatible api") - task.skipTest("indices.stats/20_translog/Translog stats on closed indices without soft-deletes", "translog settings removal is not supported under compatible api") - task.skipTest("indices.create/10_basic/Create index without soft deletes", "Make soft-deletes mandatory in 8.0 #51122 - settings changes are note supported in Rest Api compatibility") - task.skipTest("field_caps/30_filter/Field caps with index filter", "behaviour change after #63692 4digits dates are parsed as epoch and in quotes as year") - task.skipTest("indices.forcemerge/10_basic/Check deprecation warning when incompatible only_expunge_deletes and max_num_segments values are both set", "#44761 bug fix") - task.skipTest("search/340_type_query/type query", "#47207 type query throws exception in compatible mode") - task.skipTest("search/310_match_bool_prefix/multi_match multiple fields with cutoff_frequency throws exception", "#42654 cutoff_frequency, common terms are not supported. Throwing an exception") - task.skipTest("search_shards/10_basic/Search shards aliases with and without filters", "Filter representation no longer outputs default boosts") - task.skipTest("migration/10_get_feature_upgrade_status/Get feature upgrade status", "Awaits backport") - task.skipTest("search/330_fetch_fields/Test disable source", "Error no longer thrown") - task.skipTest("search/370_profile/fetch fields", "profile output has changed") - task.skipTest("search/370_profile/fetch source", "profile output has changed") - task.skipTest("search/370_profile/fetch nested source", "profile output has changed") - task.skipTest("search/240_date_nanos/doc value fields are working as expected across date and date_nanos fields", "Fetching docvalues field multiple times is no longer allowed") - task.skipTest("search/110_field_collapsing/field collapsing and rescore", "#107779 Field collapsing is compatible with rescore in 8.15") - - task.replaceValueInMatch("_type", "_doc") - task.addAllowedWarningRegex("\\[types removal\\].*") - task.replaceValueInMatch("nodes.\$node_id.roles.8", "ml", "node_info role test") - task.replaceValueInMatch("nodes.\$node_id.roles.9", "remote_cluster_client", "node_info role test") - task.removeMatch("nodes.\$node_id.roles.10", "node_info role test") - task.replaceIsTrue("test_index.mappings.type_1", "test_index.mappings._doc") - //override for indices.get and indices.create - //task.replaceIsFalse("test_index.mappings.type_1", "test_index.mappings._doc") - //overrides for indices.create/20_mix_typeless_typeful - task.replaceIsFalse("test-1.mappings._doc","false", "Create a typed index while there is a typeless template") - task.replaceIsFalse("test-1.mappings._doc","false", "Create a typeless index while there is a typed template") - - task.replaceIsTrue("test-1.mappings.my_type", "test-1.mappings._doc") - task.replaceIsTrue("test-1.mappings.my_type.properties.foo", "test-1.mappings._doc.properties.foo") - task.replaceIsTrue("test-1.mappings.my_type.properties.bar", "test-1.mappings._doc.properties.bar") - - // overrides for indices.get_field_mapping - task.replaceKeyInLength("test_index.mappings.test_type.text.mapping.text.type", - "test_index.mappings._doc.text.mapping.text.type" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.text.mapping.text.analyzer", - "test_index.mappings._doc.text.mapping.text.analyzer" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.t1.full_name", - "test_index.mappings._doc.t1.full_name" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.t2.full_name", - "test_index.mappings._doc.t2.full_name" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.obj\\.t1.full_name", - "test_index.mappings._doc.obj\\.t1.full_name" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.obj\\.i_t1.full_name", - "test_index.mappings._doc.obj\\.i_t1.full_name" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.obj\\.i_t3.full_name", - "test_index.mappings._doc.obj\\.i_t3.full_name" - ) - task.replaceKeyInLength("test_index.mappings.test_type", - "test_index.mappings._doc" - ) - task.replaceKeyInMatch("test_index_2.mappings.test_type_2.t1.full_name", - "test_index.mappings._doc.t1.full_name" - ) - task.replaceKeyInMatch("test_index_2.mappings.test_type_2.t2.full_name", - "test_index.mappings._doc.t2.full_name" - ) - task.replaceKeyInLength("test_index_2.mappings.test_type_2", - "test_index.mappings._doc" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.text.mapping.text.type", - "test_index.mappings._doc.text.mapping.text.type" - ) - // overrides for indices.put_mapping/11_basic_with_types - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text1.type", - "test_index.mappings._doc.properties.text1.type" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text1.analyzer", - "test_index.mappings._doc.properties.text1.analyzer" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text2.type", - "test_index.mappings._doc.properties.text2.type" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text2.analyzer", - "test_index.mappings._doc.properties.text2.analyzer" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.subfield.properties.text3.type", - "test_index.mappings._doc.properties.subfield.properties.text3.type" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text1.fields.text_raw.type", - "test_index.mappings._doc.properties.text1.fields.text_raw.type" - ) - // overrides for indices.put_mapping/all_path_options_with_types - task.replaceKeyInMatch("test_index1.mappings.test_type.properties.text.type", - "test_index1.mappings._doc.properties.text.type" - ) - task.replaceKeyInMatch("test_index1.mappings.test_type.properties.text.analyzer", - "test_index1.mappings._doc.properties.text.analyzer" - ) - task.replaceKeyInMatch("test_index2.mappings.test_type.properties.text.type", - "test_index2.mappings._doc.properties.text.type" - ) - task.replaceKeyInMatch("test_index2.mappings.test_type.properties.text.analyzer", - "test_index2.mappings._doc.properties.text.analyzer" - ) - task.replaceKeyInMatch("foo.mappings.test_type.properties.text.type", - "foo.mappings._doc.properties.text.type" - ) - task.replaceKeyInMatch("foo.mappings.test_type.properties.text.analyzer", - "foo.mappings._doc.properties.text.analyzer" - ) - // overrides for indices.get_mapping - task.replaceIsTrue("test_1.mappings.doc", "test_1.mappings._doc") - task.replaceIsTrue("test_2.mappings.doc", "test_2.mappings._doc") - // overrides for mget - task.replaceValueInMatch("docs.0._type", "_doc" , "Basic multi-get") // index found, but no doc - task.replaceValueInMatch("docs.0._type", "_doc", "Default index/type") - task.replaceValueInMatch("docs.0._type", "_doc", "Non-existent index") - task.replaceValueInMatch("docs.0._type", "_doc", "Missing metadata") - task.replaceValueInMatch("docs.0._type", "_doc", "Multi Get with alias that resolves to multiple indices") - task.replaceValueInMatch("docs.1._type", "_doc", "Multi Get with alias that resolves to multiple indices") - task.replaceValueInMatch("docs.2._type", "_doc", "Multi Get with alias that resolves to multiple indices") - task.replaceValueInMatch("docs.0._type", "_doc", "IDs") - task.replaceValueInMatch("docs.1._type", "_doc", "IDs") - task.replaceValueInMatch("docs.2._type", "_doc", "Routing") - - //overrides for indices.stats - //TODO fix to remove the below match - task.replaceKeyInMatch("_all.primaries.indexing.types.baz.index_total", - "_all.primaries.indexing.types._doc.index_total" - ) - task.replaceKeyInMatch("_all.primaries.indexing.types.bar.index_total", - "_all.primaries.indexing.types._doc.index_total" - ) - task.replaceValueInMatch("_all.primaries.indexing.types._doc.index_total", 2) - // points get touched by sorting in ES 8 - task.replaceValueInMatch("testindex.shards.0.stats.fields.price.points", 1) - - //override for "indices.open/10_basic/?wait_for_active_shards default is deprecated" and "indices.open/10_basic/?wait_for_active_shards=index-setting" - task.addAllowedWarningRegexForTest("\\?wait_for_active_shards=index-setting is now the default behaviour.*", "?wait_for_active_shards=index-setting") - task.removeWarningForTest("the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; " + - "specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - , "?wait_for_active_shards default is deprecated") - - // override for exception message change in #55291 tests cluster.voting_config_exclusions/10_basic/ - // 'Throw exception when adding voting config exclusion and specifying both node_ids and node_names', - // 'Throw exception when adding voting config exclusion without specifying nodes', - task.replaceValueTextByKeyValue("catch", - '/Please set node identifiers correctly. One and only one of \\[node_name\\], \\[node_names\\] and \\[node_ids\\] has to be set/', - '/You must set \\[node_names\\] or \\[node_ids\\] but not both/') - - // sync_id is no longer available in SegmentInfos.userData // "indices.flush/10_basic/Index synced flush rest test" - task.replaceIsTrue("indices.testing.shards.0.0.commit.user_data.sync_id", "indices.testing.shards.0.0.commit.user_data") - - // we can now search using doc values only - task.replaceValueInMatch("fields.object\\.nested1.long.searchable", true) - - //client.type no longer exists #101214 - task.replaceKeyInMatch("nodes.\$node_id.settings.client.type", "nodes.\$node_id.settings.node.attr.testattr") - task.replaceValueInMatch("nodes.\$node_id.settings.node.attr.testattr", "test") - task.replaceKeyInMatch("nodes.\$node_id.settings.client\\.type", "nodes.\$node_id.settings.node\\.attr\\.testattr") - task.replaceValueInMatch("nodes.\$node_id.settings.node\\.attr\\.testattr", "test") -} - tasks.register('enforceYamlTestConvention').configure { def tree = fileTree('src/main/resources/rest-api-spec/test') doLast { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.repository_verify_integrity.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.repository_verify_integrity.json new file mode 100644 index 0000000000000..bab8101b74552 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.repository_verify_integrity.json @@ -0,0 +1,65 @@ +{ + "snapshot.repository_verify_integrity":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "description":"Verifies the integrity of the contents of a snapshot repository" + }, + "stability":"experimental", + "visibility":"private", + "headers": { + "accept": [ + "application/json" + ] + }, + "url":{ + "paths":[ + { + "path":"/_snapshot/{repository}/_verify_integrity", + "methods":[ + "POST" + ], + "parts":{ + "repository":{ + "type":"string", + "description":"A repository name" + } + } + } + ] + }, + "params":{ + "meta_thread_pool_concurrency":{ + "type":"number", + "description":"Number of threads to use for reading metadata" + }, + "blob_thread_pool_concurrency":{ + "type":"number", + "description":"Number of threads to use for reading blob contents" + }, + "snapshot_verification_concurrency":{ + "type":"number", + "description":"Number of snapshots to verify concurrently" + }, + "index_verification_concurrency":{ + "type":"number", + "description":"Number of indices to verify concurrently" + }, + "index_snapshot_verification_concurrency":{ + "type":"number", + "description":"Number of snapshots to verify concurrently within each index" + }, + "max_failed_shard_snapshots":{ + "type":"number", + "description":"Maximum permitted number of failed shard snapshots" + }, + "verify_blob_contents":{ + "type":"boolean", + "description":"Whether to verify the contents of individual blobs" + }, + "max_bytes_per_sec":{ + "type":"string", + "description":"Rate limit for individual blob verification" + } + } + } +} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml index cf43797a451e7..06139542c5e55 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml @@ -258,8 +258,8 @@ --- "Dense vector stats": - requires: - cluster_features: [ "gte_v8.15.0" ] - reason: "dense vector stats reports from primary indices in 8.15" + cluster_features: [ "gte_v8.16.0" ] + reason: "dense vector stats reports from primary indices in 8.15 and fixed in 8.16" - do: indices.create: index: test1 @@ -329,9 +329,17 @@ - do: indices.refresh: { } + - do: + index: + index: test2 + id: "3" + refresh: true + body: + not_vector_field: "not vector" + - do: { cluster.stats: { } } - - match: { indices.docs.count: 4 } + - match: { indices.docs.count: 5 } - match: { indices.docs.deleted: 0 } - match: { indices.dense_vector.value_count: 8 } diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/cluster.post_voting_config_exclusions_with_node_name_part.json b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/cluster.post_voting_config_exclusions_with_node_name_part.json deleted file mode 100644 index 2cdc2f3bc9aea..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/cluster.post_voting_config_exclusions_with_node_name_part.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "cluster.post_voting_config_exclusions_with_node_name_part":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html", - "description":"Updates the cluster voting config exclusions by node_name (not node ids or node names)." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_cluster/voting_config_exclusions/{node_name}", - "methods":[ - "POST" - ], - "parts":{ - "node_name":{ - "type":"string", - "description":"A comma-separated list of node descriptors of the nodes to exclude from the voting configuration." - } - }, - "deprecated":{ - "version":"7.8.0", - "description":"node_name is deprecated, use node_names or node_ids instead" - } - } - ] - } - } -} diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/indices.put_template_with_param.json b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/indices.put_template_with_param.json deleted file mode 100644 index 7ee6cbd39ebf3..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/indices.put_template_with_param.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "indices.put_template_with_param":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", - "description":"Creates or updates an index template." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_template/{name}", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "name":{ - "type":"string", - "description":"The name of the template" - } - } - } - ] - }, - "params":{ - "template":{ - "type":"string", - "description":"The indices that this template should apply to, replaced by index_patterns within the template definition." - }, - "order":{ - "type":"number", - "description":"The order for this template when merging multiple matching ones (higher numbers are merged later, overriding the lower numbers)" - }, - "create":{ - "type":"boolean", - "description":"Whether the index template should only be added if new or can also replace an existing one", - "default":false - }, - "master_timeout":{ - "type":"time", - "description":"Specify timeout for connection to master" - } - }, - "body":{ - "description":"The template definition", - "required":true - } - } -} diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic_compat.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic_compat.yml deleted file mode 100644 index 8806918703abe..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic_compat.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "warnings_regex" - ---- -"Throw exception when adding voting config exclusion by specifying a 'node_name'": - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - cluster.post_voting_config_exclusions_with_node_name_part: - node_name: someNodeName - warnings_regex: - - ".* /_cluster/voting_config_exclusions/\\{node_name\\} has been removed. .*" - catch: /\[node_name\] has been removed, you must set \[node_names\] or \[node_ids\]/ diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.deprecated.upgrade/10_basic_upgrade.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.deprecated.upgrade/10_basic_upgrade.yml deleted file mode 100644 index b368975fa5e5b..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.deprecated.upgrade/10_basic_upgrade.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "allowed_warnings_regex" - ---- -Basic test for upgrade indices: - - requires: - cluster_features: ["gte_v7.11.0"] - reason: "_upgrade api is deprecated since 7.11.0" - test_runner_features: - - "warnings" - - do: - indices.create: - index: "test_index" - body: - settings: - index: - number_of_replicas: 0 - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - do: - catch: "bad_request" - indices.upgrade: - index: "test_index" - warnings: - - "The _upgrade API is no longer useful and will be removed. Instead, see _reindex\ - \ API." - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - match: - status: 400 - - match: - error.reason: "/Upgrade.action.(GET|POST).(_upgrade|/test_index/_upgrade).was.removed,.use._reindex.API.instead/" diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.put_template/10_basic_compat.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.put_template/10_basic_compat.yml deleted file mode 100644 index 043e525a8e9b5..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.put_template/10_basic_compat.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "warnings" - ---- -"Put template": - - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - warnings: - - "Deprecated field [template] used, replaced by [index_patterns]" - indices.put_template: - name: test - body: - template: test-* - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - properties: - field: - type: keyword - - - do: - indices.get_template: - name: test - flat_settings: true - - - match: {test.index_patterns: ["test-*"]} - - match: {test.settings: {index.number_of_shards: '1', index.number_of_replicas: '0'}} - - match: {test.mappings: {properties: {field: {type: keyword}}}} - ---- -"Put template (with template parameter)": - - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - warnings: - - "Deprecated parameter [template] used, replaced by [index_patterns]" - indices.put_template_with_param: - name: test - template: "test-*" - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - properties: - field: - type: keyword - - - do: - indices.get_template: - name: test - flat_settings: true - - - match: {test.index_patterns: ["test-*"]} - - match: {test.settings: {index.number_of_shards: '1', index.number_of_replicas: '0'}} - - match: {test.mappings: {properties: {field: {type: keyword}}}} diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/nodes.hot_threads/10_basic_compat.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/nodes.hot_threads/10_basic_compat.yml deleted file mode 100644 index c64e80d0f6a03..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/nodes.hot_threads/10_basic_compat.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "allowed_warnings_regex" - ---- -"Get hot threads": - - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - nodes.hot_threads: {} - allowed_warnings_regex: - - ".*hot_?threads.* is a deprecated endpoint.*" - - match: - $body: /:::/ diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.aggregation/10_moving_avg.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.aggregation/10_moving_avg.yml deleted file mode 100644 index c3b3c4320be97..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.aggregation/10_moving_avg.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - ---- -moving_avg agg throws exception: - - do: - catch: "/Moving Average aggregation usage is not supported. Use the \\[moving_fn\\] aggregation instead./" - search: - rest_total_hits_as_int: true - body: - aggs: - the_histo: - date_histogram: - field: "date" - calendar_interval: "1d" - aggs: - the_avg: - avg: - field: "value_field" - the_movavg: - moving_avg: - buckets_path: "the_avg" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.sort/10_nested_path_filter.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.sort/10_nested_path_filter.yml deleted file mode 100644 index 323a5b9abbf1e..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.sort/10_nested_path_filter.yml +++ /dev/null @@ -1,149 +0,0 @@ ---- -setup: -- skip: - features: - - "headers" - - "allowed_warnings_regex" -- do: - indices.create: - index: "my-index" - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - properties: - offer: - type: "nested" -- do: - index: - index: "my-index" - id: "1" - refresh: true - body: - offer: - price: 10 - color: blue - - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - -- do: - indices.create: - index: "my-locations" - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - properties: - pin: - properties: - location: - type: geo_point - offer: - type: "nested" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - -- do: - index: - index: "my-locations" - id: "1" - refresh: true - body: - offer: - price: 10 - color: blue - pin: - location: - lat: 40.12 - lon: -71.34 - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - - - - - ---- -"Sort with nested_path throws exception": -- do: - catch: /\[nested_path\] has been removed in favour of the \[nested\] parameter/ - search: - rest_total_hits_as_int: true - index: "my-index" - body: - sort: - - offer.price: - mode: avg - order: asc - nested_path: offer - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - ---- -"Sort with nested_filter throws exception": - - do: - catch: /\[nested_filter\] has been removed in favour of the \[nested\] parameter/ - search: - rest_total_hits_as_int: true - index: "my-index" - body: - sort: - - offer.price: - mode: avg - order: asc - nested_filter: - term: - offer.color: blue - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - - ---- -"Geo search with nested_filter throws exception": - - do: - catch: /\[nested_filter\] has been removed in favour of the \[nested\] parameter/ - search: - rest_total_hits_as_int: true - index: "my-locations" - body: - query: - match_all: {} - sort: - _geo_distance: - pin.location: - - -70 - - 40 - nested_filter: - term: - offer.color: blue - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - ---- -"Geo search with nested_path throws exception": - - do: - catch: /\[nested_path\] has been removed in favour of the \[nested\] parameter/ - search: - rest_total_hits_as_int: true - index: "my-locations" - body: - query: - match_all: {} - sort: - _geo_distance: - pin.location: - - -70 - - 40 - nested_path: "offer" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_cutoff_frequency.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_cutoff_frequency.yml deleted file mode 100644 index b7df872ff0a86..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_cutoff_frequency.yml +++ /dev/null @@ -1,103 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "allowed_warnings_regex" - - do: - indices.create: - index: "test" - body: - mappings: - properties: - my_field1: - type: "text" - my_field2: - type: "text" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - do: - index: - index: "test" - id: "1" - body: - my_field1: "brown fox jump" - my_field2: "xylophone" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - do: - indices.refresh: {} - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - ---- -multi_match multiple fields with cutoff_frequency throws exception: -- do: - catch: "/cutoff_freqency is not supported. The \\[multi_match\\] query can skip block of documents efficiently if the total number of hits is not tracked/" - search: - rest_total_hits_as_int: true - index: "test" - body: - query: - multi_match: - query: "brown" - type: "bool_prefix" - fields: - - "my_field1" - - "my_field2" - cutoff_frequency: 0.001 - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - ---- -match with cutoff_frequency throws exception: - - do: - catch: "/cutoff_freqency is not supported. The \\[match\\] query can skip block of documents efficiently if the total number of hits is not tracked/" - search: - rest_total_hits_as_int: true - index: "test" - body: - query: - match: - my_field1: - query: "brown" - type: "bool_prefix" - cutoff_frequency: 0.001 - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - ---- -common querythrows exception: - - do: - catch: "/Common Terms Query usage is not supported. Use \\[match\\] query which can efficiently skip blocks of documents if the total number of hits is not tracked./" - search: - rest_total_hits_as_int: true - index: "test" - body: - query: - common: - my_field1: - query: "brown" - type: "bool_prefix" - cutoff_frequency: 0.001 - low_freq_operator: or - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_geo_bounding_box.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_geo_bounding_box.yml deleted file mode 100644 index 3f3eac1e59e1a..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_geo_bounding_box.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "warnings" - - do: - indices.create: - index: locations - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - - properties: - location: - type: geo_point - - do: - bulk: - index: locations - refresh: true - body: | - {"index":{}} - {"location" : {"lat": 13.5, "lon" : 34.89}} - {"index":{}} - {"location" : {"lat": -7.9, "lon" : 120.78}} - {"index":{}} - {"location" : {"lat": 45.78, "lon" : -173.45}} - {"index":{}} - {"location" : {"lat": 32.45, "lon" : 45.6}} - {"index":{}} - {"location" : {"lat": -63.24, "lon" : 31.0}} - {"index":{}} - {"location" : {"lat": 0.0, "lon" : 0.0}} - - ---- -"geo bounding box query not compatible": - - do: - catch: /failed to parse \[geo_bounding_box\] query. unexpected field \[type\]/ - search: - index: locations - body: - query: - geo_bounding_box: - type : indexed - location: - top_left: - lat: 10 - lon: -10 - bottom_right: - lat: -10 - lon: 10 - ---- -"geo bounding box query compatible": - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - warnings: - - "Deprecated parameter [type] used, it should no longer be specified." - search: - index: locations - body: - query: - geo_bounding_box: - type : indexed - location: - top_left: - lat: 10 - lon: -10 - bottom_right: - lat: -10 - lon: 10 - - match: {hits.total.value: 1} - diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_type_query.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_type_query.yml deleted file mode 100644 index fdaebbb2b81e7..0000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_type_query.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -setup: - - skip: - features: - - "headers" - - "allowed_warnings_regex" ---- -type query throws exception when used: - - do: - index: - index: "test1" - id: "1" - type: "cat" - refresh: true - body: - foo: "bar" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - - do: - catch: /\[types removal\] Type queries are deprecated, prefer to filter on a field instead./ - search: - rest_total_hits_as_int: true - index: "test1" - body: - query: - type: - value: "cat" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - - do: - catch: /\[types removal\] Type queries are deprecated, prefer to filter on a field instead./ - search: - rest_total_hits_as_int: true - index: "test1" - body: - query: - type: - value: "_doc" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java index 937addb473f8b..c80f13861e83f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.indices.mapping; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperParsingException; @@ -33,6 +34,8 @@ protected boolean forbidPrivateIndexSettings() { * contains unknown parameters. We were able to create those templates in 7.x still, so we need * to be able to index new documents into them. Indexing should issue a deprecation warning though. */ + @UpdateForV9 + @AwaitsFix(bugUrl = "this is testing 7.x specific compatibility which may be n/a now after 9.0 bump") public void testBWCMalformedDynamicTemplate() { // this parameter is not supported by "keyword" field type String mapping = """ diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index f9dc42cb7abe8..1e3a7d5a6b817 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -107,6 +108,8 @@ public void testGetShardSnapshotOnEmptyRepositoriesListThrowsAnError() { expectThrows(IllegalArgumentException.class, () -> getLatestSnapshotForShardFuture(Collections.emptyList(), "idx", 0, false)); } + @UpdateForV9 + // below we were selecting an index version between current and 7.5.0, this has been updated to 8.0.0 now but that might need to change public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exception { final String repoName = "repo-name"; final Path repoPath = randomRepoPath(); @@ -114,7 +117,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce final boolean useBwCFormat = randomBoolean(); if (useBwCFormat) { - final IndexVersion version = randomVersionBetween(random(), IndexVersions.V_7_5_0, IndexVersion.current()); + final IndexVersion version = randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); initWithSnapshotVersion(repoName, repoPath, version); } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 2bd1d79afd52d..ad50856c556f7 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -206,8 +206,9 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_ADD_INDEX_MODE_CONCRETE_INDICES = def(8_736_00_0); public static final TransportVersion UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH = def(8_737_00_0); public static final TransportVersion ESQL_AGGREGATE_EXEC_TRACKS_INTERMEDIATE_ATTRS = def(8_738_00_0); - public static final TransportVersion CCS_TELEMETRY_STATS = def(8_739_00_0); + public static final TransportVersion GLOBAL_RETENTION_TELEMETRY = def(8_740_00_0); + /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ @@ -265,6 +266,8 @@ static TransportVersion def(int id) { * Reference to the earliest compatible transport version to this version of the codebase. * This should be the transport version used by the highest minor version of the previous major. */ + @UpdateForV9 + // This needs to be bumped to the 8.last public static final TransportVersion MINIMUM_COMPATIBLE = V_7_17_0; /** diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 0164c6b80fa6b..1f3ab73889278 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -13,8 +13,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Assertions; -import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -185,7 +185,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_1 = new Version(8_15_01_99); public static final Version V_8_15_2 = new Version(8_15_02_99); public static final Version V_8_16_0 = new Version(8_16_00_99); - public static final Version CURRENT = V_8_16_0; + public static final Version V_9_0_0 = new Version(9_00_00_99); + public static final Version CURRENT = V_9_0_0; private static final NavigableMap VERSION_IDS; private static final Map VERSION_STRINGS; @@ -221,14 +222,7 @@ public class Version implements VersionId, ToXContentFragment { } } } - assert RestApiVersion.current().major == CURRENT.major && RestApiVersion.previous().major == CURRENT.major - 1 - : "RestApiVersion must be upgraded " - + "to reflect major from Version.CURRENT [" - + CURRENT.major - + "]" - + " but is still set to [" - + RestApiVersion.current().major - + "]"; + assertRestApiVersion(); builder.put(V_EMPTY_ID, V_EMPTY); builderByString.put(V_EMPTY.toString(), V_EMPTY); @@ -236,6 +230,19 @@ public class Version implements VersionId, ToXContentFragment { VERSION_STRINGS = Map.copyOf(builderByString); } + @UpdateForV9 + // Re-enable this assertion once the rest api version is bumped + private static void assertRestApiVersion() { + // assert RestApiVersion.current().major == CURRENT.major && RestApiVersion.previous().major == CURRENT.major - 1 + // : "RestApiVersion must be upgraded " + // + "to reflect major from Version.CURRENT [" + // + CURRENT.major + // + "]" + // + " but is still set to [" + // + RestApiVersion.current().major + // + "]"; + } + public static Version readVersion(StreamInput in) throws IOException { return fromId(in.readVInt()); } diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index 7d2b1be79731e..8ccc190a0444b 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -45,20 +45,21 @@ public static class Request extends MasterNodeRequest implements Indice private DownsampleConfig downsampleConfig; public Request( + TimeValue masterNodeTimeout, final String sourceIndex, final String targetIndex, final TimeValue waitTimeout, final DownsampleConfig downsampleConfig ) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + super(masterNodeTimeout); this.sourceIndex = sourceIndex; this.targetIndex = targetIndex; this.waitTimeout = waitTimeout == null ? DEFAULT_WAIT_TIMEOUT : waitTimeout; this.downsampleConfig = downsampleConfig; } - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public Request(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java new file mode 100644 index 0000000000000..d647956e752a3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Holds the data stream failure store metadata that enable or disable the failure store of a data stream. Currently, it + * supports the following configurations: + * - enabled + */ +public record DataStreamFailureStore(boolean enabled) implements SimpleDiffable, ToXContentObject { + + public static final ParseField ENABLED_FIELD = new ParseField("enabled"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "failure_store", + false, + (args, unused) -> new DataStreamFailureStore(args[0] == null || (Boolean) args[0]) + ); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED_FIELD); + } + + public DataStreamFailureStore() { + this(true); + } + + public DataStreamFailureStore(StreamInput in) throws IOException { + this(in.readBoolean()); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return SimpleDiffable.readDiffFrom(DataStreamFailureStore::new, in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(enabled); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ENABLED_FIELD.getPreferredName(), enabled); + builder.endObject(); + return builder; + } + + public static DataStreamFailureStore fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java new file mode 100644 index 0000000000000..9c7d2a986fa48 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Holds data stream dedicated configuration options such as failure store, (in the future lifecycle). Currently, it + * supports the following configurations: + * - failure store + */ +public record DataStreamOptions(@Nullable DataStreamFailureStore failureStore) + implements + SimpleDiffable, + ToXContentObject { + + public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "options", + false, + (args, unused) -> new DataStreamOptions((DataStreamFailureStore) args[0]) + ); + + static { + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataStreamFailureStore.fromXContent(p), + FAILURE_STORE_FIELD, + ObjectParser.ValueType.OBJECT_OR_NULL + ); + } + + public DataStreamOptions() { + this(null); + } + + public static DataStreamOptions read(StreamInput in) throws IOException { + return new DataStreamOptions(in.readOptionalWriteable(DataStreamFailureStore::new)); + } + + @Nullable + public DataStreamFailureStore getFailureStore() { + return failureStore; + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return SimpleDiffable.readDiffFrom(DataStreamOptions::read, in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(failureStore); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (failureStore != null) { + builder.field(FAILURE_STORE_FIELD.getPreferredName(), failureStore); + } + builder.endObject(); + return builder; + } + + public static DataStreamOptions fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 459c6c6ec733e..b945fe7e510f6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -567,17 +567,14 @@ private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions // Exclude this one as it's a net-new system index, and we explicitly don't want those. return false; } - if (DataStream.isFailureStoreFeatureFlagEnabled()) { - IndexAbstraction indexAbstraction = context.getState().metadata().getIndicesLookup().get(index.getName()); - if (context.options.allowFailureIndices() == false) { - DataStream parentDataStream = indexAbstraction.getParentDataStream(); - if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { - if (parentDataStream.isFailureStoreIndex(index.getName())) { - if (options.ignoreUnavailable()) { - return false; - } else { - throw new FailureIndexNotSupportedException(index); - } + if (DataStream.isFailureStoreFeatureFlagEnabled() && context.options.allowFailureIndices() == false) { + DataStream parentDataStream = context.getState().metadata().getIndicesLookup().get(index.getName()).getParentDataStream(); + if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { + if (parentDataStream.isFailureStoreIndex(index.getName())) { + if (options.ignoreUnavailable()) { + return false; + } else { + throw new FailureIndexNotSupportedException(index); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 991aa66d07ff6..6ef1d6dd86b34 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -413,6 +413,11 @@ private float getShardWriteLoad(String index) { private float maxShardSizeBytes(String index) { final var indexMetadata = metadata.index(index); + if (indexMetadata.ignoreDiskWatermarks()) { + // disk watermarks are ignored for partial searchable snapshots + // and is equivalent to indexMetadata.isPartialSearchableSnapshot() + return 0; + } var maxShardSizeBytes = indexMetadata.getForecastedShardSizeInBytes().orElse(0L); for (int shard = 0; shard < indexMetadata.getNumberOfShards(); shard++) { final var shardId = new ShardId(indexMetadata.getIndex(), shard); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index acdc3e32ea31a..95552fa508f72 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -884,24 +884,26 @@ protected StoredFieldsReader doGetSequentialStoredFieldsReader(StoredFieldsReade } } - DirectoryReaderWithAllLiveDocs(DirectoryReader in) throws IOException { - super(in, new SubReaderWrapper() { - @Override - public LeafReader wrap(LeafReader leaf) { - final SegmentReader segmentReader = segmentReader(leaf); - final Bits hardLiveDocs = segmentReader.getHardLiveDocs(); - if (hardLiveDocs == null) { - return new LeafReaderWithLiveDocs(leaf, null, leaf.maxDoc()); - } - // Once soft-deletes is enabled, we no longer hard-update or hard-delete documents directly. - // Two scenarios that we have hard-deletes: (1) from old segments where soft-deletes was disabled, - // (2) when IndexWriter hits non-aborted exceptions. These two cases, IW flushes SegmentInfos - // before exposing the hard-deletes, thus we can use the hard-delete count of SegmentInfos. - final int numDocs = segmentReader.maxDoc() - segmentReader.getSegmentInfo().getDelCount(); - assert numDocs == popCount(hardLiveDocs) : numDocs + " != " + popCount(hardLiveDocs); - return new LeafReaderWithLiveDocs(segmentReader, hardLiveDocs, numDocs); + private static final SubReaderWrapper ALL_LIVE_DOCS_SUB_READER_WRAPPER = new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader leaf) { + final SegmentReader segmentReader = segmentReader(leaf); + final Bits hardLiveDocs = segmentReader.getHardLiveDocs(); + if (hardLiveDocs == null) { + return new LeafReaderWithLiveDocs(leaf, null, leaf.maxDoc()); } - }); + // Once soft-deletes is enabled, we no longer hard-update or hard-delete documents directly. + // Two scenarios that we have hard-deletes: (1) from old segments where soft-deletes was disabled, + // (2) when IndexWriter hits non-aborted exceptions. These two cases, IW flushes SegmentInfos + // before exposing the hard-deletes, thus we can use the hard-delete count of SegmentInfos. + final int numDocs = segmentReader.maxDoc() - segmentReader.getSegmentInfo().getDelCount(); + assert numDocs == popCount(hardLiveDocs) : numDocs + " != " + popCount(hardLiveDocs); + return new LeafReaderWithLiveDocs(segmentReader, hardLiveDocs, numDocs); + } + }; + + DirectoryReaderWithAllLiveDocs(DirectoryReader in) throws IOException { + super(in, ALL_LIVE_DOCS_SUB_READER_WRAPPER); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 608d88fdef664..7bc8273eef525 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -166,7 +166,7 @@ private static IndexVersion def(int id, Version luceneVersion) { * In branches 8.7-8.11 see server/src/main/java/org/elasticsearch/index/IndexVersion.java for the equivalent definitions. */ - public static final IndexVersion MINIMUM_COMPATIBLE = V_7_0_0; + public static final IndexVersion MINIMUM_COMPATIBLE = V_8_0_0; static final NavigableMap VERSION_IDS = getAllVersionIds(IndexVersions.class); static final IndexVersion LATEST_DEFINED; @@ -217,8 +217,10 @@ static NavigableMap getAllVersionIds(Class cls) { return Collections.unmodifiableNavigableMap(builder); } + @UpdateForV9 + // We can simplify this once we've removed all references to index versions earlier than MINIMUM_COMPATIBLE static Collection getAllVersions() { - return VERSION_IDS.values(); + return VERSION_IDS.values().stream().filter(v -> v.onOrAfter(MINIMUM_COMPATIBLE)).toList(); } static final IntFunction VERSION_LOOKUP = ReleaseVersions.generateVersionsLookup(IndexVersions.class, LATEST_DEFINED.id()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 05c661701e1a7..8dec3fadd8a04 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -284,7 +284,7 @@ private long getDenseVectorValueCount(final LeafReader atomicReader, List 0) { + if (info != null && info.getVectorDimension() > 0) { switch (info.getVectorEncoding()) { case FLOAT32 -> { FloatVectorValues values = atomicReader.getFloatVectorValues(info.name); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 062e7551a53c9..638af1a105328 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -42,7 +42,6 @@ import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.lookup.FieldValues; import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.net.InetAddress; @@ -545,8 +544,9 @@ protected String contentType() { @Override protected void parseCreateField(DocumentParserContext context) throws IOException { InetAddress address; + String value = context.parser().textOrNull(); try { - address = value(context.parser(), nullValue); + address = value == null ? nullValue : InetAddresses.forString(value); } catch (IllegalArgumentException e) { if (ignoreMalformed) { context.addIgnoredField(fieldType().name()); @@ -564,14 +564,6 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } } - private static InetAddress value(XContentParser parser, InetAddress nullValue) throws IOException { - String value = parser.textOrNull(); - if (value == null) { - return nullValue; - } - return InetAddresses.forString(value); - } - private void indexValue(DocumentParserContext context, InetAddress address) { if (dimension) { context.getDimensions().addIp(fieldType().name(), address).validate(context.indexSettings()); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index b7d1beb4d1e06..8f1ae42a7475c 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1700,13 +1700,22 @@ public void setGlobalCheckpointIfUnpromotable(long globalCheckpoint) { private static final class NonClosingReaderWrapper extends FilterDirectoryReader { + private static final LeafReader[] EMPTY_LEAF_READERS = new LeafReader[0]; + + private static final FilterDirectoryReader.SubReaderWrapper SUB_READER_WRAPPER = new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + return reader; + } + + @Override + protected LeafReader[] wrap(List readers) { + return readers.toArray(EMPTY_LEAF_READERS); + } + }; + private NonClosingReaderWrapper(DirectoryReader in) throws IOException { - super(in, new SubReaderWrapper() { - @Override - public LeafReader wrap(LeafReader reader) { - return reader; - } - }); + super(in, SUB_READER_WRAPPER); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java index 6ab1bb6413fa6..586b1bae4cc9a 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java @@ -21,10 +21,14 @@ * close is called before. */ public abstract class SlicedInputStream extends InputStream { - private int slice = 0; + private int nextSlice = 0; private InputStream currentStream; + private int currentSliceOffset = 0; private final int numSlices; + private boolean closed = false; private boolean initialized = false; + private int markedSlice = -1; + private int markedSliceOffset = -1; /** * Creates a new SlicedInputStream @@ -36,18 +40,23 @@ protected SlicedInputStream(final int numSlices) { private InputStream nextStream() throws IOException { assert initialized == false || currentStream != null; + assert closed == false : "attempted to get next stream when closed"; initialized = true; IOUtils.close(currentStream); - if (slice < numSlices) { - currentStream = openSlice(slice++); + if (nextSlice < numSlices) { + currentStream = openSlice(nextSlice++); } else { currentStream = null; } + currentSliceOffset = 0; return currentStream; } /** * Called for each logical slice given a zero based slice ordinal. + * + * Note that if {@link InputStream#markSupported()} is true (can be overridden to return false), the function may be called again to + * open a previous slice (which must have the same size as before). The returned InputStreams do not need to support mark/reset. */ protected abstract InputStream openSlice(int slice) throws IOException; @@ -69,6 +78,7 @@ public final int read() throws IOException { nextStream(); return read(); } + currentSliceOffset++; return read; } @@ -83,14 +93,22 @@ public final int read(byte[] buffer, int offset, int length) throws IOException nextStream(); return read(buffer, offset, length); } + currentSliceOffset += read; return read; } @Override - public final void close() throws IOException { - IOUtils.close(currentStream); + public void close() throws IOException { + closed = true; initialized = true; + currentSliceOffset = 0; + final InputStream stream = currentStream; currentStream = null; + IOUtils.close(stream); + } + + public boolean isClosed() { + return closed; } @Override @@ -99,4 +117,47 @@ public final int available() throws IOException { return stream == null ? 0 : stream.available(); } + @Override + public boolean markSupported() { + return true; + } + + @Override + public void mark(int readLimit) { + // We ignore readLimit since openSlice() can re-open previous InputStreams, and we can skip as many bytes as we'd like. + // According to JDK documentation, marking a closed InputStream should have no effect. + if (markSupported() && isClosed() == false && numSlices > 0) { + if (initialized) { + markedSlice = nextSlice - 1; + markedSliceOffset = currentSliceOffset; + } else { + markedSlice = 0; + markedSliceOffset = 0; + } + } + } + + @Override + public void reset() throws IOException { + if (markSupported()) { + if (isClosed()) { + throw new IOException("reset called on a closed stream"); + } else if (numSlices > 0) { + if (markedSlice < 0 || markedSliceOffset < 0) { + throw new IOException("Mark has not been set"); + } + + // We do not call the SlicedInputStream's skipNBytes but call skipNBytes directly on the returned stream, to ensure that + // the skip is performed on the marked slice and no other slices are involved. This may help uncover any bugs. + nextSlice = markedSlice; + final InputStream stream = nextStream(); + if (stream != null) { + stream.skipNBytes(markedSliceOffset); + } + currentSliceOffset = markedSliceOffset; + } + } else { + throw new IOException("mark/reset not supported"); + } + } } diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java index 3261ac83a7e67..bafbd57a607a5 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java @@ -562,11 +562,10 @@ public static SystemIndexAccessLevel getSystemIndexAccessLevel(ThreadContext thr // This method intentionally cannot return BACKWARDS_COMPATIBLE_ONLY - that access level should only be used manually // in known special cases. final String headerValue = threadContext.getHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY); - final String productHeaderValue = threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY); final boolean allowed = Booleans.parseBoolean(headerValue, true); if (allowed) { - if (productHeaderValue != null) { + if (threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY) != null) { return SystemIndexAccessLevel.RESTRICTED; } else { return SystemIndexAccessLevel.ALL; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/SnapshotFilesProvider.java b/server/src/main/java/org/elasticsearch/indices/recovery/SnapshotFilesProvider.java index 1424ef160657b..0a1bf765bc12d 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/SnapshotFilesProvider.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/SnapshotFilesProvider.java @@ -52,6 +52,11 @@ public InputStream getInputStreamForSnapshotFile( protected InputStream openSlice(int slice) throws IOException { return container.readBlob(OperationPurpose.SNAPSHOT_DATA, fileInfo.partName(slice)); } + + @Override + public boolean markSupported() { + return false; + } }; } return blobStoreRepository.maybeRateLimitRestores(inputStream, rateLimiterListener::accept); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index eb9ef08b329ab..ea53882a22a01 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -855,7 +855,8 @@ private void construct( featureService, systemIndices, dataStreamGlobalRetentionSettings, - documentParsingProvider + documentParsingProvider, + taskManager ); Collection pluginComponents = pluginsService.flatMap(plugin -> { diff --git a/server/src/main/java/org/elasticsearch/node/PluginServiceInstances.java b/server/src/main/java/org/elasticsearch/node/PluginServiceInstances.java index 7c8775502fd64..74ae5936c9602 100644 --- a/server/src/main/java/org/elasticsearch/node/PluginServiceInstances.java +++ b/server/src/main/java/org/elasticsearch/node/PluginServiceInstances.java @@ -24,6 +24,7 @@ import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -48,5 +49,6 @@ public record PluginServiceInstances( FeatureService featureService, SystemIndices systemIndices, DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings, - DocumentParsingProvider documentParsingProvider + DocumentParsingProvider documentParsingProvider, + TaskManager taskManager ) implements Plugin.PluginServices {} diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index a8bfda54b0646..4441ecadc3e8e 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -31,6 +31,7 @@ import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -165,6 +166,12 @@ public interface PluginServices { * A provider of utilities to observe and report parsing of documents */ DocumentParsingProvider documentParsingProvider(); + + /** + * The task manager for the node. This should only be used by plugins + * to track task removal by registering a RemovedTaskListener. + */ + TaskManager taskManager(); } /** diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index c6494eca9823b..72376d5b20fdb 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -281,6 +281,13 @@ public Collection getSnapshotIds() { return snapshotIds.values(); } + /** + * @return the number of index snapshots (i.e. the sum of the index count of each snapshot) + */ + public long getIndexSnapshotCount() { + return indexSnapshots.values().stream().mapToLong(List::size).sum(); + } + /** * @return whether some of the {@link SnapshotDetails} of the given snapshot are missing, due to BwC, so that they must be loaded from * the {@link SnapshotInfo} blob instead. diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 104cb95018312..43af0a970857b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -3606,6 +3606,11 @@ protected InputStream openSlice(int slice) throws IOException { ensureNotClosing(store); return container.readBlob(OperationPurpose.SNAPSHOT_DATA, fileInfo.partName(slice)); } + + @Override + public boolean markSupported() { + return false; + } })) { final byte[] buffer = new byte[Math.toIntExact(Math.min(bufferSize, fileInfo.length()))]; int length; diff --git a/server/src/main/java/org/elasticsearch/rest/RestCompatibleVersionHelper.java b/server/src/main/java/org/elasticsearch/rest/RestCompatibleVersionHelper.java index ca574a5c7eba3..a71394aaaf39f 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestCompatibleVersionHelper.java +++ b/server/src/main/java/org/elasticsearch/rest/RestCompatibleVersionHelper.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.xcontent.MediaType; import org.elasticsearch.xcontent.ParsedMediaType; @@ -25,6 +26,7 @@ class RestCompatibleVersionHelper { /** * @return The requested API version, or {@link Optional#empty()} if there was no explicit version in the request. */ + @UpdateForV9 static Optional getCompatibleVersion( @Nullable ParsedMediaType acceptHeader, @Nullable ParsedMediaType contentTypeHeader, @@ -49,7 +51,8 @@ static Optional getCompatibleVersion( if (hasContent) { // content-type version must be current or prior - if (contentTypeVersion > RestApiVersion.current().major || contentTypeVersion < RestApiVersion.minimumSupported().major) { + // This can be uncommented once all references to RestApiVersion.V_7 are removed + /*if (contentTypeVersion > RestApiVersion.current().major || contentTypeVersion < RestApiVersion.minimumSupported().major) { throw new ElasticsearchStatusException( "Content-Type version must be either version {} or {}, but found {}. Content-Type={}", RestStatus.BAD_REQUEST, @@ -58,7 +61,7 @@ static Optional getCompatibleVersion( contentTypeVersion, contentTypeHeader ); - } + }*/ // if both accept and content-type are sent, the version must match if (contentTypeVersion != acceptVersion) { throw new ElasticsearchStatusException( diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollector.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollector.java index b63b961ed7b6a..00b523d4b2a33 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollector.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollector.java @@ -189,14 +189,10 @@ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOExcept return new FilterLeafCollector(topDocsLeafCollector) { @Override public void setScorer(Scorable scorer) throws IOException { - super.setScorer(new FilterScorable(scorer) { - @Override - public void setMinCompetitiveScore(float minScore) { - // Ignore calls to setMinCompetitiveScore. The top docs collector may try to skip low - // scoring hits, but the overall score_mode won't allow it because an aggs collector - // was originally provided which never supports TOP_SCORES is not supported for aggs - } - }); + // Ignore calls to setMinCompetitiveScore. The top docs collector may try to skip low + // scoring hits, but the overall score_mode won't allow it because an aggs collector + // was originally provided which never supports TOP_SCORES is not supported for aggs + super.setScorer(wrapToIgnoreMinCompetitiveScore(scorer)); } @Override @@ -208,6 +204,13 @@ public DocIdSetIterator competitiveIterator() throws IOException { return new CompositeLeafCollector(postFilterBits, topDocsLeafCollector, aggsLeafCollector); } + private static FilterScorable wrapToIgnoreMinCompetitiveScore(Scorable scorer) { + return new FilterScorable(scorer) { + @Override + public void setMinCompetitiveScore(float minScore) {} + }; + } + private class TopDocsLeafCollector implements LeafCollector { private final Bits postFilterBits; private final LeafCollector topDocsLeafCollector; @@ -262,14 +265,10 @@ public void setScorer(Scorable scorer) throws IOException { if (cacheScores && topDocsLeafCollector != null && aggsLeafCollector != null) { scorer = ScoreCachingWrappingScorer.wrap(scorer); } - scorer = new FilterScorable(scorer) { - @Override - public void setMinCompetitiveScore(float minScore) { - // Ignore calls to setMinCompetitiveScore so that if the top docs collector - // wants to skip low-scoring hits, the aggs collector still sees all hits. - // this is important also for terminate_after in case used when total hits tracking is early terminated. - } - }; + // Ignore calls to setMinCompetitiveScore so that if the top docs collector + // wants to skip low-scoring hits, the aggs collector still sees all hits. + // this is important also for terminate_after in case used when total hits tracking is early terminated. + scorer = wrapToIgnoreMinCompetitiveScore(scorer); if (topDocsLeafCollector != null) { topDocsLeafCollector.setScorer(scorer); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index b6f1ac46b4250..4ff99d17195a0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -54,7 +54,7 @@ protected VersionStats mutateInstance(VersionStats instance) { return new VersionStats(instance.versionStats().stream().map(svs -> { return switch (randomIntBetween(1, 4)) { case 1 -> new VersionStats.SingleVersionStats( - IndexVersions.V_7_3_0, + IndexVersions.V_8_3_0, svs.indexCount, svs.primaryShardCount, svs.totalPrimaryByteCount @@ -89,12 +89,12 @@ public void testCreation() { metadata = new Metadata.Builder().put(indexMeta("foo", IndexVersion.current(), 4), true) .put(indexMeta("bar", IndexVersion.current(), 3), true) - .put(indexMeta("baz", IndexVersions.V_7_0_0, 2), true) + .put(indexMeta("baz", IndexVersions.V_8_0_0, 2), true) .build(); stats = VersionStats.of(metadata, Collections.emptyList()); assertThat(stats.versionStats().size(), equalTo(2)); VersionStats.SingleVersionStats s1 = new VersionStats.SingleVersionStats(IndexVersion.current(), 2, 7, 0); - VersionStats.SingleVersionStats s2 = new VersionStats.SingleVersionStats(IndexVersions.V_7_0_0, 1, 2, 0); + VersionStats.SingleVersionStats s2 = new VersionStats.SingleVersionStats(IndexVersions.V_8_0_0, 1, 2, 0); assertThat(stats.versionStats(), containsInAnyOrder(s1, s2)); ShardId shardId = new ShardId("bar", "uuid", 0); @@ -135,7 +135,7 @@ public void testCreation() { stats = VersionStats.of(metadata, Collections.singletonList(nodeResponse)); assertThat(stats.versionStats().size(), equalTo(2)); s1 = new VersionStats.SingleVersionStats(IndexVersion.current(), 2, 7, 100); - s2 = new VersionStats.SingleVersionStats(IndexVersions.V_7_0_0, 1, 2, 0); + s2 = new VersionStats.SingleVersionStats(IndexVersions.V_8_0_0, 1, 2, 0); assertThat(stats.versionStats(), containsInAnyOrder(s1, s2)); } @@ -144,7 +144,7 @@ private static IndexMetadata indexMeta(String name, IndexVersion version, int pr } public static VersionStats randomInstance() { - List versions = List.of(IndexVersion.current(), IndexVersions.V_7_0_0, IndexVersions.V_7_1_0, IndexVersions.V_7_2_0); + List versions = List.of(IndexVersion.current(), IndexVersions.V_8_0_0, IndexVersions.V_8_1_0, IndexVersions.V_8_2_0); List stats = new ArrayList<>(); for (IndexVersion v : versions) { VersionStats.SingleVersionStats s = new VersionStats.SingleVersionStats( diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponseTests.java index c39cc6ebfd665..4edc6ce589486 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponseTests.java @@ -10,10 +10,8 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.IndexMode; @@ -23,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Base64; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -37,8 +34,6 @@ import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomIndexResponsesWithMappingHash; import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomIndexResponsesWithoutMappingHash; import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomMappingHashToIndices; -import static org.hamcrest.Matchers.anEmptyMap; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.nullValue; @@ -191,49 +186,6 @@ public void testSerializeNodeResponseBetweenOldNodes() throws IOException { } } - public void testReadNodeResponseFromPre82() throws Exception { - final Version minCompactVersion = Version.CURRENT.minimumCompatibilityVersion(); - assertTrue("Remove this test once minCompactVersion >= 8.2.0", minCompactVersion.before(Version.V_8_2_0)); - String base64 = "AwhpbmRleF8wMQIKYmx1ZV9maWVsZApibHVlX2ZpZWxkBGxvbmcAAQEAAAAJcmVkX2ZpZWxkCXJlZF9maWVsZAR0ZXh0AAEAAAAAAQhpbm" - + "RleF8wMgAACGluZGV4XzAzAgdfc2VxX25vB19zZXFfbm8EbG9uZwEBAQAAAAx5ZWxsb3dfZmllbGQMeWVsbG93X2ZpZWxkB2tleXdvcmQAAQEAAAABAAEI" - + "aW5kZXhfMTAGdXVpZF9hAQ=="; - StreamInput in = StreamInput.wrap(Base64.getDecoder().decode(base64)); - in.setTransportVersion(TransportVersions.V_8_1_0); - FieldCapabilitiesNodeResponse nodeResp = new FieldCapabilitiesNodeResponse(in); - assertThat(nodeResp.getUnmatchedShardIds(), equalTo(Set.of(new ShardId("index_10", "uuid_a", 1)))); - assertThat(nodeResp.getFailures(), anEmptyMap()); - assertThat( - nodeResp.getIndexResponses(), - contains( - new FieldCapabilitiesIndexResponse( - "index_01", - null, - Map.of( - "red_field", - new IndexFieldCapabilities("red_field", "text", false, true, false, false, null, Map.of()), - "blue_field", - new IndexFieldCapabilities("blue_field", "long", false, true, true, false, null, Map.of()) - ), - true, - IndexMode.STANDARD - ), - new FieldCapabilitiesIndexResponse("index_02", null, Map.of(), false, IndexMode.STANDARD), - new FieldCapabilitiesIndexResponse( - "index_03", - null, - Map.of( - "yellow_field", - new IndexFieldCapabilities("yellow_field", "keyword", false, true, true, false, null, Map.of()), - "_seq_no", - new IndexFieldCapabilities("_seq_no", "long", true, true, true, false, null, Map.of()) - ), - true, - IndexMode.STANDARD - ) - ) - ); - } - private static FieldCapabilitiesNodeResponse randomNodeResponse(List indexResponses) { int numUnmatched = randomIntBetween(0, 3); final Set unmatchedShardIds = new HashSet<>(); diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index cc4d4de1e0f39..bdeef56eca89b 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -11,10 +11,8 @@ import org.elasticsearch.ElasticsearchExceptionTests; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ChunkedToXContent; @@ -29,7 +27,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -40,8 +37,6 @@ import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomIndexResponsesWithMappingHash; import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomIndexResponsesWithoutMappingHash; import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomMappingHashToIndices; -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.nullValue; @@ -246,46 +241,4 @@ public void testSerializeCCSResponseBetweenOldClusters() throws IOException { } } } - - public void testReadCCSResponseFromPre82() throws Exception { - final Version minCompactVersion = Version.CURRENT.minimumCompatibilityVersion(); - assertTrue("Remove this test once minCompactVersion >= 8.2.0", minCompactVersion.before(Version.V_8_2_0)); - String base64 = "AAADCGluZGV4XzAxAgpibHVlX2ZpZWxkCmJsdWVfZmllbGQEbG9uZwABAQAAAAlyZWRfZmllbGQJcmVkX2ZpZWxkBHRleHQAAQAAAAABC" - + "GluZGV4XzAyAAAIaW5kZXhfMDMCDHllbGxvd19maWVsZAx5ZWxsb3dfZmllbGQHa2V5d29yZAABAQAAAAdfc2VxX25vB19zZXFfbm8EbG9uZwEBAQAAAA" - + "EAAAAAAAAAAAA="; - StreamInput in = StreamInput.wrap(Base64.getDecoder().decode(base64)); - in.setTransportVersion(TransportVersions.V_8_1_0); - FieldCapabilitiesResponse nodeResp = new FieldCapabilitiesResponse(in); - assertThat(nodeResp.getFailures(), empty()); - assertThat( - nodeResp.getIndexResponses(), - contains( - new FieldCapabilitiesIndexResponse( - "index_01", - null, - Map.of( - "red_field", - new IndexFieldCapabilities("red_field", "text", false, true, false, false, null, Map.of()), - "blue_field", - new IndexFieldCapabilities("blue_field", "long", false, true, true, false, null, Map.of()) - ), - true, - IndexMode.STANDARD - ), - new FieldCapabilitiesIndexResponse("index_02", null, Map.of(), false, IndexMode.STANDARD), - new FieldCapabilitiesIndexResponse( - "index_03", - null, - Map.of( - "yellow_field", - new IndexFieldCapabilities("yellow_field", "keyword", false, true, true, false, null, Map.of()), - "_seq_no", - new IndexFieldCapabilities("_seq_no", "long", true, true, true, false, null, Map.of()) - ), - true, - IndexMode.STANDARD - ) - ) - ); - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreTests.java new file mode 100644 index 0000000000000..f5334f903af6b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public class DataStreamFailureStoreTests extends AbstractXContentSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return DataStreamFailureStore::new; + } + + @Override + protected DataStreamFailureStore createTestInstance() { + return randomFailureStore(); + } + + @Override + protected DataStreamFailureStore mutateInstance(DataStreamFailureStore instance) throws IOException { + return new DataStreamFailureStore(instance.enabled() == false); + } + + @Override + protected DataStreamFailureStore doParseInstance(XContentParser parser) throws IOException { + return DataStreamFailureStore.fromXContent(parser); + } + + static DataStreamFailureStore randomFailureStore() { + return new DataStreamFailureStore(randomBoolean()); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamOptionsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamOptionsTests.java new file mode 100644 index 0000000000000..8a7cf2329b863 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamOptionsTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public class DataStreamOptionsTests extends AbstractXContentSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return DataStreamOptions::read; + } + + @Override + protected DataStreamOptions createTestInstance() { + return new DataStreamOptions(randomBoolean() ? null : DataStreamFailureStoreTests.randomFailureStore()); + } + + @Override + protected DataStreamOptions mutateInstance(DataStreamOptions instance) throws IOException { + var failureStore = instance.getFailureStore(); + if (failureStore == null) { + failureStore = DataStreamFailureStoreTests.randomFailureStore(); + } else { + failureStore = randomBoolean() ? null : new DataStreamFailureStore(failureStore.enabled() == false); + } + return new DataStreamOptions(failureStore); + } + + @Override + protected DataStreamOptions doParseInstance(XContentParser parser) throws IOException { + return DataStreamOptions.fromXContent(parser); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 01394a7abbcd5..2190f8f20e762 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNotFoundException; @@ -1313,6 +1314,8 @@ public void testRejectTranslogRetentionSettings() { ); } + @UpdateForV9 + @AwaitsFix(bugUrl = "looks like a test that's not applicable to 9.0 after version bump") public void testDeprecateTranslogRetentionSettings() { request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder settings = Settings.builder(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index 955d7d2de6882..0faff63a72682 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Predicates; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; @@ -866,20 +867,23 @@ public void testFindMappingsWithFilters() throws IOException { public void testOldestIndexComputation() { Metadata metadata = buildIndicesWithVersions( - IndexVersions.V_7_0_0, + IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current(), IndexVersion.fromId(IndexVersion.current().id() + 1) ).build(); - assertEquals(IndexVersions.V_7_0_0, metadata.oldestIndexVersion()); + assertEquals(IndexVersions.MINIMUM_COMPATIBLE, metadata.oldestIndexVersion()); Metadata.Builder b = Metadata.builder(); assertEquals(IndexVersion.current(), b.build().oldestIndexVersion()); Throwable ex = expectThrows( IllegalArgumentException.class, - () -> buildIndicesWithVersions(IndexVersions.V_7_0_0, IndexVersions.ZERO, IndexVersion.fromId(IndexVersion.current().id() + 1)) - .build() + () -> buildIndicesWithVersions( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.ZERO, + IndexVersion.fromId(IndexVersion.current().id() + 1) + ).build() ); assertEquals("[index.version.created] is not present in the index settings for index with UUID [null]", ex.getMessage()); @@ -1962,6 +1966,8 @@ public void testHiddenAliasValidation() { } } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test needs to be updated or removed after the version 9.0 bump") public void testSystemAliasValidationMixedVersionSystemAndRegularFails() { final IndexVersion random7xVersion = IndexVersionUtils.randomVersionBetween( random(), @@ -2012,6 +2018,8 @@ public void testSystemAliasValidationNewSystemAndRegularFails() { ); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test needs to be updated or removed after the version 9.0 bump") public void testSystemAliasOldSystemAndNewRegular() { final IndexVersion random7xVersion = IndexVersionUtils.randomVersionBetween( random(), @@ -2025,6 +2033,8 @@ public void testSystemAliasOldSystemAndNewRegular() { metadataWithIndices(oldVersionSystem, regularIndex); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test needs to be updated or removed after the version 9.0 bump") public void testSystemIndexValidationAllRegular() { final IndexVersion random7xVersion = IndexVersionUtils.randomVersionBetween( random(), @@ -2039,6 +2049,8 @@ public void testSystemIndexValidationAllRegular() { metadataWithIndices(currentVersionSystem, currentVersionSystem2, oldVersionSystem); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test needs to be updated or removed after the version 9.0 bump") public void testSystemAliasValidationAllSystemSomeOld() { final IndexVersion random7xVersion = IndexVersionUtils.randomVersionBetween( random(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java index e03183e43239f..6177b571fd598 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java @@ -116,7 +116,7 @@ public void testBalanceByForecastWriteLoad() { ); var clusterState = applyStartedShardsUntilNoChange( - stateWithStartedIndices( + createStateWithIndices( anIndex("heavy-index").indexWriteLoadForecast(8.0), anIndex("light-index-1").indexWriteLoadForecast(1.0), anIndex("light-index-2").indexWriteLoadForecast(2.0), @@ -151,7 +151,7 @@ public void testBalanceByForecastDiskUsage() { var allocationService = createAllocationService(WITH_DISK_BALANCING); var clusterState = applyStartedShardsUntilNoChange( - stateWithStartedIndices( + createStateWithIndices( anIndex("heavy-index").shardSizeInBytesForecast(ByteSizeValue.ofGb(8).getBytes()), anIndex("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), anIndex("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), @@ -196,7 +196,7 @@ public void testBalanceByActualDiskUsage() { ); var clusterState = applyStartedShardsUntilNoChange( - stateWithStartedIndices( + createStateWithIndices( anIndex("heavy-index"), anIndex("light-index-1"), anIndex("light-index-2"), @@ -232,7 +232,7 @@ public void testBalanceByActualAndForecastDiskUsage() { ); var clusterState = applyStartedShardsUntilNoChange( - stateWithStartedIndices( + createStateWithIndices( anIndex("heavy-index"),// size is set in cluster info anIndex("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), anIndex("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), @@ -268,7 +268,7 @@ public void testDoNotBalancePartialIndicesByDiskUsage() { .put(SETTING_IGNORE_DISK_WATERMARKS.getKey(), true); var clusterState = applyStartedShardsUntilNoChange( - stateWithStartedIndices( + createStateWithIndices( anIndex("frozen-index-1", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), anIndex("frozen-index-2", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), anIndex("frozen-index-3", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), @@ -536,13 +536,32 @@ private static IndexMetadata.Builder anIndex(String name, Settings.Builder setti return IndexMetadata.builder(name).settings(settings); } - private static ClusterState stateWithStartedIndices(IndexMetadata.Builder... indices) { + private static ClusterState createStateWithIndices(IndexMetadata.Builder... indexMetadataBuilders) { var metadataBuilder = Metadata.builder(); var routingTableBuilder = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); - for (var index : indices) { - var build = index.build(); - metadataBuilder.put(build, false); - routingTableBuilder.addAsNew(build); + if (randomBoolean()) { + // allocate all shards from scratch + for (var index : indexMetadataBuilders) { + var indexMetadata = index.build(); + metadataBuilder.put(indexMetadata, false); + routingTableBuilder.addAsNew(indexMetadata); + } + } else { + // ensure unbalanced cluster cloud be properly balanced + // simulates a case when we add a second node and ensure shards could be evenly spread across all available nodes + for (var index : indexMetadataBuilders) { + var inSyncId = UUIDs.randomBase64UUID(); + var indexMetadata = index.putInSyncAllocationIds(0, Set.of(inSyncId)).build(); + metadataBuilder.put(indexMetadata, false); + routingTableBuilder.add( + IndexRoutingTable.builder(indexMetadata.getIndex()) + .addShard( + shardRoutingBuilder(new ShardId(indexMetadata.getIndex(), 0), "node-1", true, ShardRoutingState.STARTED) + .withAllocationId(AllocationId.newInitializing(inSyncId)) + .build() + ) + ); + } } return ClusterState.builder(ClusterName.DEFAULT) diff --git a/server/src/test/java/org/elasticsearch/common/ReferenceDocsTests.java b/server/src/test/java/org/elasticsearch/common/ReferenceDocsTests.java index 0fabf78017304..49208f2341701 100644 --- a/server/src/test/java/org/elasticsearch/common/ReferenceDocsTests.java +++ b/server/src/test/java/org/elasticsearch/common/ReferenceDocsTests.java @@ -66,7 +66,7 @@ public void testResourceValidation() throws Exception { builder.startObject("UNEXPECTED").endObject().endObject(); try (var stream = BytesReference.bytes(builder).streamInput()) { - expectThrows(IllegalStateException.class, () -> ReferenceDocs.readLinksBySymbol(stream)); + expectThrows(IllegalArgumentException.class, () -> ReferenceDocs.readLinksBySymbol(stream)); } } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/CompatibleNamedXContentRegistryTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/CompatibleNamedXContentRegistryTests.java index aa62553447db0..47da6f8cdc0f8 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/CompatibleNamedXContentRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/CompatibleNamedXContentRegistryTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; @@ -114,6 +115,8 @@ public static NewSubObject parse(XContentParser parser) { } } + @UpdateForV9 + @AwaitsFix(bugUrl = "this can be re-enabled once our rest api version is bumped to V_9") public void testNotCompatibleRequest() throws IOException { NamedXContentRegistry registry = new NamedXContentRegistry( List.of( diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index adab51a37d2bf..b7dea50ee4386 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.index.Index; @@ -537,6 +538,8 @@ public void testBlocksDowngradeToVersionWithMultipleNodesInDataPath() throws IOE } } + @UpdateForV9 + @AwaitsFix(bugUrl = "test won't work until we remove and bump minimum index versions") public void testIndexCompatibilityChecks() throws IOException { final Settings settings = buildEnvSettings(Settings.EMPTY); @@ -634,6 +637,8 @@ public void testSymlinkDataDirectory() throws Exception { env.close(); } + @UpdateForV9 + @AwaitsFix(bugUrl = "test won't work until we remove and bump minimum index versions") public void testGetBestDowngradeVersion() { assertThat(NodeEnvironment.getBestDowngradeVersion("7.17.0"), Matchers.equalTo("7.17.0")); assertThat(NodeEnvironment.getBestDowngradeVersion("7.17.5"), Matchers.equalTo("7.17.5")); diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index f60812977d578..499861ceb346d 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -78,6 +79,8 @@ public void testEqualsHashcodeSerialization() { ); } + @UpdateForV9 + @AwaitsFix(bugUrl = "as mentioned in the comment below, the behavior here is changing for 9.0 so this test needs updating") public void testReadsFormatWithoutVersion() throws IOException { // the behaviour tested here is only appropriate if the current version is compatible with versions 7 and earlier assertTrue(IndexVersions.MINIMUM_COMPATIBLE.onOrBefore(IndexVersions.V_7_0_0)); @@ -151,6 +154,8 @@ public void testDoesNotUpgradeAncientVersion() { ); } + @UpdateForV9 + @AwaitsFix(bugUrl = "Needs to be updated for 9.0 version bump") public void testUpgradeMarksPreviousVersion() { final String nodeId = randomAlphaOfLength(10); final Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index 379adc9ce517a..b2db13c1481ec 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -159,20 +159,6 @@ public void testSortingAgainstAliases() { assertEquals("Cannot use alias [field] as an index sort field", e.getMessage()); } - public void testSortingAgainstAliasesPre713() { - IndexSettings indexSettings = indexSettings( - Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.V_7_12_0).put("index.sort.field", "field").build() - ); - MappedFieldType aliased = new KeywordFieldMapper.KeywordFieldType("aliased"); - Sort sort = buildIndexSort(indexSettings, Map.of("field", aliased)); - assertThat(sort.getSort(), arrayWithSize(1)); - assertThat(sort.getSort()[0].getField(), equalTo("aliased")); - assertWarnings( - "Index sort for index [test] defined on field [field] which resolves to field [aliased]. " - + "You will not be able to define an index sort over aliased fields in new indexes" - ); - } - public void testTimeSeriesMode() { IndexSettings indexSettings = indexSettings( Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java b/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java index dcf73ec617e60..d37d03407f691 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.Version; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; import org.hamcrest.Matchers; @@ -32,27 +33,27 @@ public class IndexVersionTests extends ESTestCase { public void testVersionComparison() { - IndexVersion V_7_2_0 = IndexVersions.V_7_2_0; - IndexVersion V_8_0_0 = IndexVersions.V_8_0_0; - assertThat(V_7_2_0.before(V_8_0_0), is(true)); - assertThat(V_7_2_0.before(V_7_2_0), is(false)); - assertThat(V_8_0_0.before(V_7_2_0), is(false)); - - assertThat(V_7_2_0.onOrBefore(V_8_0_0), is(true)); - assertThat(V_7_2_0.onOrBefore(V_7_2_0), is(true)); - assertThat(V_8_0_0.onOrBefore(V_7_2_0), is(false)); - - assertThat(V_7_2_0.after(V_8_0_0), is(false)); - assertThat(V_7_2_0.after(V_7_2_0), is(false)); - assertThat(V_8_0_0.after(V_7_2_0), is(true)); - - assertThat(V_7_2_0.onOrAfter(V_8_0_0), is(false)); - assertThat(V_7_2_0.onOrAfter(V_7_2_0), is(true)); - assertThat(V_8_0_0.onOrAfter(V_7_2_0), is(true)); - - assertThat(V_7_2_0, is(lessThan(V_8_0_0))); - assertThat(V_7_2_0.compareTo(V_7_2_0), is(0)); - assertThat(V_8_0_0, is(greaterThan(V_7_2_0))); + IndexVersion V_8_2_0 = IndexVersions.V_8_2_0; + IndexVersion current = IndexVersion.current(); + assertThat(V_8_2_0.before(current), is(true)); + assertThat(V_8_2_0.before(V_8_2_0), is(false)); + assertThat(current.before(V_8_2_0), is(false)); + + assertThat(V_8_2_0.onOrBefore(current), is(true)); + assertThat(V_8_2_0.onOrBefore(V_8_2_0), is(true)); + assertThat(current.onOrBefore(V_8_2_0), is(false)); + + assertThat(V_8_2_0.after(current), is(false)); + assertThat(V_8_2_0.after(V_8_2_0), is(false)); + assertThat(current.after(V_8_2_0), is(true)); + + assertThat(V_8_2_0.onOrAfter(current), is(false)); + assertThat(V_8_2_0.onOrAfter(V_8_2_0), is(true)); + assertThat(current.onOrAfter(V_8_2_0), is(true)); + + assertThat(V_8_2_0, is(lessThan(current))); + assertThat(V_8_2_0.compareTo(V_8_2_0), is(0)); + assertThat(current, is(greaterThan(V_8_2_0))); } public static class CorrectFakeVersion { @@ -149,6 +150,8 @@ public void testMax() { } } + @UpdateForV9 + @AwaitsFix(bugUrl = "believe this fails because index version has not yet been bumped to 9.0") public void testMinimumCompatibleVersion() { assertThat(IndexVersion.getMinimumCompatibleIndexVersion(7170099), equalTo(IndexVersion.fromId(6000099))); assertThat(IndexVersion.getMinimumCompatibleIndexVersion(8000099), equalTo(IndexVersion.fromId(7000099))); @@ -189,6 +192,8 @@ public void testParseLenient() { } } + @UpdateForV9 + @AwaitsFix(bugUrl = "can be unmuted once lucene is bumped to version 10") public void testLuceneVersionOnUnknownVersions() { // between two known versions, should use the lucene version of the previous version IndexVersion previousVersion = IndexVersionUtils.getPreviousVersion(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 47d75c6d0bd13..818dafcfeeda1 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -175,7 +175,6 @@ import java.util.function.ToLongBiFunction; import java.util.stream.Collectors; import java.util.stream.LongStream; -import java.util.stream.StreamSupport; import static java.util.Collections.shuffle; import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; @@ -6616,117 +6615,6 @@ public void testRebuildLocalCheckpointTrackerAndVersionMap() throws Exception { } } - public void testRecoverFromHardDeletesIndex() throws Exception { - IndexWriterFactory hardDeletesWriter = (directory, iwc) -> new IndexWriter(directory, iwc) { - boolean isTombstone(Iterable doc) { - return StreamSupport.stream(doc.spliterator(), false).anyMatch(d -> d.name().equals(Lucene.SOFT_DELETES_FIELD)); - } - - @Override - public long addDocument(Iterable doc) throws IOException { - if (isTombstone(doc)) { - return 0; - } - return super.addDocument(doc); - } - - @Override - public long addDocuments(Iterable> docs) throws IOException { - if (StreamSupport.stream(docs.spliterator(), false).anyMatch(this::isTombstone)) { - return 0; - } - return super.addDocuments(docs); - } - - @Override - public long softUpdateDocument(Term term, Iterable doc, Field... softDeletes) throws IOException { - if (isTombstone(doc)) { - return super.deleteDocuments(term); - } else { - return super.updateDocument(term, doc); - } - } - - @Override - public long softUpdateDocuments(Term term, Iterable> docs, Field... softDeletes) - throws IOException { - if (StreamSupport.stream(docs.spliterator(), false).anyMatch(this::isTombstone)) { - return super.deleteDocuments(term); - } else { - return super.updateDocuments(term, docs); - } - } - }; - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - Path translogPath = createTempDir(); - List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()); - final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()) - .settings( - Settings.builder() - .put(defaultSettings.getSettings()) - .put( - IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0) - ) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) - ) - .build(); - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); - try (Store store = createStore()) { - EngineConfig config = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); - final List docs; - try ( - InternalEngine hardDeletesEngine = createEngine( - indexSettings, - store, - translogPath, - newMergePolicy(), - hardDeletesWriter, - null, - globalCheckpoint::get - ) - ) { - for (Engine.Operation op : operations) { - applyOperation(hardDeletesEngine, op); - if (randomBoolean()) { - hardDeletesEngine.syncTranslog(); - globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), hardDeletesEngine.getPersistedLocalCheckpoint())); - } - if (randomInt(100) < 10) { - hardDeletesEngine.refresh("test"); - } - if (randomInt(100) < 5) { - hardDeletesEngine.flush(true, true); - } - } - docs = getDocIds(hardDeletesEngine, true); - } - // We need to remove min_retained_seq_no commit tag as the actual hard-deletes engine does not have it. - store.trimUnsafeCommits(translogPath); - Map userData = new HashMap<>(store.readLastCommittedSegmentsInfo().userData); - userData.remove(Engine.MIN_RETAINED_SEQNO); - IndexWriterConfig indexWriterConfig = new IndexWriterConfig(null).setOpenMode(IndexWriterConfig.OpenMode.APPEND) - .setIndexCreatedVersionMajor(IndexVersion.current().luceneVersion().major) - .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) - .setCommitOnClose(false) - .setMergePolicy(NoMergePolicy.INSTANCE); - try (IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig)) { - writer.setLiveCommitData(userData.entrySet()); - writer.commit(); - } - try (InternalEngine softDeletesEngine = new InternalEngine(config)) { // do not recover from translog - assertThat(softDeletesEngine.getLastCommittedSegmentInfos().userData, equalTo(userData)); - assertThat(softDeletesEngine.getVersionMap().keySet(), empty()); - recoverFromTranslog(softDeletesEngine, translogHandler, Long.MAX_VALUE); - if (randomBoolean()) { - engine.forceMerge(randomBoolean(), 1, false, UUIDs.randomBase64UUID()); - } - assertThat(getDocIds(softDeletesEngine, true), equalTo(docs)); - assertConsistentHistoryBetweenTranslogAndLuceneIndex(softDeletesEngine); - } - } - } - void assertLuceneOperations(InternalEngine engine, long expectedAppends, long expectedUpdates, long expectedDeletes) { String message = "Lucene operations mismatched;" + " appends [actual:" @@ -7500,14 +7388,14 @@ public void testTrimUnsafeCommitHasESVersionInUserData() throws IOException { .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); try (IndexWriter indexWriter = new IndexWriter(store.directory(), indexWriterConfig)) { Map commitUserDataWithOlderVersion = new HashMap<>(committedSegmentsInfo.userData); - commitUserDataWithOlderVersion.put(ES_VERSION, IndexVersions.V_7_0_0.toString()); + commitUserDataWithOlderVersion.put(ES_VERSION, IndexVersions.MINIMUM_COMPATIBLE.toString()); indexWriter.setLiveCommitData(commitUserDataWithOlderVersion.entrySet()); indexWriter.commit(); } Map userDataBeforeTrimUnsafeCommits = store.readLastCommittedSegmentsInfo().getUserData(); assertThat(userDataBeforeTrimUnsafeCommits, hasKey(ES_VERSION)); - assertThat(userDataBeforeTrimUnsafeCommits.get(ES_VERSION), is(equalTo(IndexVersions.V_7_0_0.toString()))); + assertThat(userDataBeforeTrimUnsafeCommits.get(ES_VERSION), is(equalTo(IndexVersions.MINIMUM_COMPATIBLE.toString()))); store.trimUnsafeCommits(config.getTranslogConfig().getTranslogPath()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index aa28e8a3bd5b4..ff4f7e3c2e52e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -19,9 +19,7 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.script.DateFieldScript; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -45,7 +43,6 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -import static org.mockito.Mockito.mock; public class DateFieldMapperTests extends MapperTestCase { @@ -247,10 +244,6 @@ public void testBadNullValue() throws IOException { + "failed to parse date field [foo] with format [strict_date_optional_time||epoch_millis]" ) ); - - createDocumentMapper(IndexVersions.V_7_9_0, fieldMapping(b -> b.field("type", "date").field("null_value", "foo"))); - - assertWarnings("Error parsing [foo] as date in [null_value] on field [field]); [null_value] will be ignored"); } public void testNullConfigValuesFail() { @@ -753,51 +746,4 @@ public void testLegacyField() throws Exception { assertNotEquals(DEFAULT_DATE_TIME_FORMATTER, ((DateFieldType) service.fieldType("mydate")).dateTimeFormatter); } - public void testLegacyDateFormatName() { - DateFieldMapper.Builder builder = new DateFieldMapper.Builder( - "format", - DateFieldMapper.Resolution.MILLISECONDS, - null, - mock(ScriptService.class), - true, - // BWC compatible index, e.g 7.x - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0) - ) - ); - - // Check that we allow the use of camel case date formats on 7.x indices - @SuppressWarnings("unchecked") - FieldMapper.Parameter formatParam = (FieldMapper.Parameter) builder.getParameters()[3]; - formatParam.parse("date_time_format", mock(MappingParserContext.class), "strictDateOptionalTime"); - builder.buildFormatter(); // shouldn't throw exception - - formatParam.parse("date_time_format", mock(MappingParserContext.class), "strictDateOptionalTime||strictDateOptionalTimeNanos"); - builder.buildFormatter(); // shouldn't throw exception - - DateFieldMapper.Builder newFieldBuilder = new DateFieldMapper.Builder( - "format", - DateFieldMapper.Resolution.MILLISECONDS, - null, - mock(ScriptService.class), - true, - IndexVersion.current() - ); - - @SuppressWarnings("unchecked") - final FieldMapper.Parameter newFormatParam = (FieldMapper.Parameter) newFieldBuilder.getParameters()[3]; - - // Check that we don't allow the use of camel case date formats on 8.x indices - assertEquals( - "Error parsing [format] on field [format]: Invalid format: [strictDateOptionalTime]: Unknown pattern letter: t", - expectThrows(IllegalArgumentException.class, () -> { - newFormatParam.parse("date_time_format", mock(MappingParserContext.class), "strictDateOptionalTime"); - assertEquals("strictDateOptionalTime", newFormatParam.getValue()); - newFieldBuilder.buildFormatter(); - }).getMessage() - ); - - } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index a5a5d9726f233..4d6e730afded0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -20,10 +20,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.XContentTestUtils; -import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; @@ -221,61 +219,25 @@ public void testSimpleWithXContentTraverse() throws Exception { } public void testDynamicMapperWithBadMapping() throws IOException { - { - // in 7.x versions this will issue a deprecation warning - IndexVersion version = IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0) - ); - DocumentMapper mapper = createDocumentMapper(version, topMapping(b -> { - b.startArray("dynamic_templates"); - { - b.startObject(); - { - b.startObject("test"); - { - b.field("match_mapping_type", "string"); - b.startObject("mapping").field("badparam", false).endObject(); - } - b.endObject(); - } - b.endObject(); - } - b.endArray(); - })); - assertWarnings( - "Parameter [badparam] is used in a dynamic template mapping and has no effect on type [null]. " - + "Usage will result in an error in future major versions and should be removed." - ); - mapper.parse(source(b -> b.field("field", "foo"))); - assertWarnings( - "Parameter [badparam] is used in a dynamic template mapping and has no effect on type [null]. " - + "Usage will result in an error in future major versions and should be removed." - ); - } - - { - // in 8.x it will error out - Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(topMapping(b -> { - b.startArray("dynamic_templates"); + // in 8.x it will error out + Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); { - b.startObject(); + b.startObject("test"); { - b.startObject("test"); - { - b.field("match_mapping_type", "string"); - b.startObject("mapping").field("badparam", false).endObject(); - } - b.endObject(); + b.field("match_mapping_type", "string"); + b.startObject("mapping").field("badparam", false).endObject(); } b.endObject(); } - b.endArray(); - }))); - assertThat(e.getMessage(), containsString("dynamic template [test] has invalid content")); - assertThat(e.getCause().getMessage(), containsString("badparam")); - } + b.endObject(); + } + b.endArray(); + }))); + assertThat(e.getMessage(), containsString("dynamic template [test] has invalid content")); + assertThat(e.getCause().getMessage(), containsString("badparam")); } public void testDynamicRuntimeWithBadMapping() { @@ -677,35 +639,6 @@ public void testIllegalDynamicTemplateNoMappingTypeRuntime() throws Exception { assertEquals("unknown parameter [foo] on runtime field [__dynamic__my_template] of type [date]", e.getRootCause().getMessage()); } - public void testIllegalDynamicTemplate7DotXIndex() throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject(); - { - mapping.startObject(MapperService.SINGLE_MAPPING_NAME); - mapping.startArray("dynamic_templates"); - { - mapping.startObject(); - mapping.startObject("my_template"); - mapping.field("match_mapping_type", "string"); - mapping.startObject("mapping"); - mapping.field("type", "string"); - mapping.endObject(); - mapping.endObject(); - mapping.endObject(); - } - mapping.endArray(); - mapping.endObject(); - } - mapping.endObject(); - IndexVersion createdVersion = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_7_0); - MapperService mapperService = createMapperService(createdVersion, mapping); - assertThat(mapperService.documentMapper().mappingSource().toString(), containsString("\"type\":\"string\"")); - assertWarnings(""" - dynamic template [my_template] has invalid content \ - [{"match_mapping_type":"string","mapping":{"type":"string"}}], attempted to validate it \ - with the following match_mapping_type: [string], last error: [No mapper found for type [string]]"""); - } - public void testTemplateWithoutMatchPredicates() throws Exception { XContentBuilder mapping = XContentFactory.jsonBuilder(); mapping.startObject(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index 0b5fef2b5971c..11544d81a6914 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -9,9 +9,7 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.termvectors.TermVectorsService; -import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; @@ -80,39 +78,4 @@ public void testUsingEnabledSettingThrows() { ); } - /** - * disabling the _field_names should still work for indices before 8.0 - */ - public void testUsingEnabledBefore8() throws Exception { - - DocumentMapper docMapper = createDocumentMapper( - IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0), - topMapping(b -> b.startObject("_field_names").field("enabled", false).endObject()) - ); - - assertWarnings(FieldNamesFieldMapper.ENABLED_DEPRECATION_MESSAGE); - FieldNamesFieldMapper fieldNamesMapper = docMapper.metadataMapper(FieldNamesFieldMapper.class); - assertFalse(fieldNamesMapper.fieldType().isEnabled()); - - ParsedDocument doc = docMapper.parse(source(b -> b.field("field", "value"))); - assertNull(doc.rootDoc().get("_field_names")); - } - - /** - * Merging the "_field_names" enabled setting is forbidden in 8.0, but we still want to tests the behavior on pre-8 indices - */ - public void testMergingMappingsBefore8() throws Exception { - MapperService mapperService = createMapperService( - IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0), - mapping(b -> {}) - ); - - merge(mapperService, topMapping(b -> b.startObject("_field_names").field("enabled", false).endObject())); - assertFalse(mapperService.documentMapper().metadataMapper(FieldNamesFieldMapper.class).fieldType().isEnabled()); - assertWarnings(FieldNamesFieldMapper.ENABLED_DEPRECATION_MESSAGE); - - merge(mapperService, topMapping(b -> b.startObject("_field_names").field("enabled", true).endObject())); - assertTrue(mapperService.documentMapper().metadataMapper(FieldNamesFieldMapper.class).fieldType().isEnabled()); - assertWarnings(FieldNamesFieldMapper.ENABLED_DEPRECATION_MESSAGE); - } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index a38775e76c689..1fc5b370e4614 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.script.IpFieldScript; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; @@ -208,12 +207,6 @@ public void testNullValue() throws IOException { e.getMessage(), "Failed to parse mapping: Error parsing [null_value] on field [field]: ':1' is not an IP string literal." ); - - createDocumentMapper(IndexVersions.V_7_9_0, fieldMapping(b -> { - b.field("type", "ip"); - b.field("null_value", ":1"); - })); - assertWarnings("Error parsing [:1] as IP in [null_value] on field [field]); [null_value] will be ignored"); } public void testDimension() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java index 4a9791fce7496..5579a8522a450 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -26,8 +27,6 @@ import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptCompiler; -import org.elasticsearch.test.TransportVersionUtils; -import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; @@ -547,37 +546,6 @@ public void testDeprecatedParameterName() { {"field":{"type":"test_mapper","fixed2":true,"required":"value"}}""", Strings.toString(mapper)); } - /** - * test parsing mapping from dynamic templates, should ignore unknown parameters for bwc and log warning before 8.0.0 - */ - public void testBWCunknownParametersfromDynamicTemplates() { - String mapping = """ - {"type":"test_mapper","some_unknown_parameter":true,"required":"value"}"""; - TestMapper mapper = fromMapping( - mapping, - IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0), - TransportVersionUtils.randomVersionBetween( - random(), - TransportVersions.V_7_0_0, - TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_0_0) - ), - true - ); - assertNotNull(mapper); - assertWarnings( - "Parameter [some_unknown_parameter] is used in a dynamic template mapping and has no effect on type [test_mapper]. " - + "Usage will result in an error in future major versions and should be removed." - ); - assertEquals(""" - {"field":{"type":"test_mapper","required":"value"}}""", Strings.toString(mapper)); - - MapperParsingException ex = expectThrows( - MapperParsingException.class, - () -> fromMapping(mapping, IndexVersions.V_8_0_0, TransportVersions.V_8_0_0, true) - ); - assertEquals("unknown parameter [some_unknown_parameter] on mapper [field] of type [test_mapper]", ex.getMessage()); - } - public void testAnalyzers() { String mapping = """ {"type":"test_mapper","analyzer":"_standard","required":"value"}"""; @@ -609,6 +577,8 @@ public void testAnalyzers() { ); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public void testDeprecatedParameters() { // 'index' is declared explicitly, 'store' is not, but is one of the previously always-accepted params String mapping = """ diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index 035466d93ab06..8013a93581757 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -70,9 +70,6 @@ public void testMultiFieldWithinMultiField() throws IOException { Mapper.TypeParser typeParser = KeywordFieldMapper.PARSER; - // For indices created prior to 8.0, we should only emit a warning and not fail parsing. - Map fieldNode = XContentHelper.convertToMap(BytesReference.bytes(mapping), true, mapping.contentType()).v2(); - MapperService mapperService = mock(MapperService.class); IndexAnalyzers indexAnalyzers = IndexAnalyzers.of(defaultAnalyzers()); when(mapperService.getIndexAnalyzers()).thenReturn(indexAnalyzers); @@ -86,32 +83,6 @@ public void testMultiFieldWithinMultiField() throws IOException { IndexSettings indexSettings = new IndexSettings(metadata, Settings.EMPTY); when(mapperService.getIndexSettings()).thenReturn(indexSettings); - IndexVersion olderVersion = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); - MappingParserContext olderContext = new MappingParserContext( - null, - type -> typeParser, - type -> null, - olderVersion, - () -> TransportVersions.MINIMUM_COMPATIBLE, - null, - ScriptCompiler.NONE, - mapperService.getIndexAnalyzers(), - mapperService.getIndexSettings(), - ProvidedIdFieldMapper.NO_FIELD_DATA, - query -> { - throw new UnsupportedOperationException(); - } - ); - - TextFieldMapper.PARSER.parse("some-field", fieldNode, olderContext); - assertWarnings( - "At least one multi-field, [sub-field], " - + "was encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated " - + "and is not supported for indices created in 8.0 and later. To migrate the mappings, all instances of [fields] " - + "that occur within a [fields] block should be removed from the mappings, either by flattening the chained " - + "[fields] blocks into a single level, or switching to [copy_to] if appropriate." - ); - // For indices created in 8.0 or later, we should throw an error. Map fieldNodeCopy = XContentHelper.convertToMap(BytesReference.bytes(mapping), true, mapping.contentType()).v2(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java index 1df42368041ac..a3fd002e68a1c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java @@ -11,13 +11,13 @@ import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; import org.elasticsearch.script.field.vectors.BinaryDenseVectorDocValuesField; import org.elasticsearch.script.field.vectors.ByteBinaryDenseVectorDocValuesField; import org.elasticsearch.script.field.vectors.DenseVector; import org.elasticsearch.script.field.vectors.DenseVectorDocValuesField; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; import java.nio.ByteBuffer; @@ -32,7 +32,7 @@ public void testFloatGetVectorValueAndGetMagnitude() throws IOException { float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; float[] expectedMagnitudes = { 1.7320f, 2.4495f, 3.3166f }; - for (IndexVersion indexVersion : List.of(IndexVersions.V_7_4_0, IndexVersion.current())) { + for (IndexVersion indexVersion : List.of(IndexVersionUtils.randomCompatibleVersion(random()), IndexVersion.current())) { BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, indexVersion); DenseVectorDocValuesField field = new BinaryDenseVectorDocValuesField(docValues, "test", ElementType.FLOAT, dims, indexVersion); DenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java index 83b8a8fa991c2..ad719e398ae37 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java @@ -1321,24 +1321,6 @@ public void testDefaultParamsIndexByDefault() throws Exception { assertEquals(VectorSimilarity.COSINE, denseVectorFieldType.getSimilarity()); } - public void testAddDocumentsToIndexBefore_V_7_5_0() throws Exception { - IndexVersion indexVersion = IndexVersions.V_7_4_0; - DocumentMapper mapper = createDocumentMapper( - indexVersion, - fieldMapping(b -> b.field("index", false).field("type", "dense_vector").field("dims", 3)) - ); - - float[] validVector = { -12.1f, 100.7f, -4 }; - ParsedDocument doc1 = mapper.parse(source(b -> b.array("field", validVector))); - List fields = doc1.rootDoc().getFields("field"); - assertEquals(1, fields.size()); - assertThat(fields.get(0), instanceOf(BinaryDocValuesField.class)); - // assert that after decoding the indexed value is equal to expected - BytesRef vectorBR = fields.get(0).binaryValue(); - float[] decodedValues = decodeDenseVector(indexVersion, vectorBR); - assertArrayEquals("Decoded dense vector values is not equal to the indexed one.", validVector, decodedValues, 0.001f); - } - public void testValidateOnBuild() { final MapperBuilderContext context = MapperBuilderContext.root(false, false); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java index 9cfbbad5ebf50..2bfd8740a971c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java @@ -13,7 +13,6 @@ import org.apache.lucene.document.FeatureField; import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DocumentMapper; @@ -22,11 +21,8 @@ import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperTestCase; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentType; import org.hamcrest.Matchers; import org.junit.AssumptionViolatedException; @@ -226,44 +222,6 @@ protected IndexVersion boostNotAllowedIndexVersion() { return NEW_SPARSE_VECTOR_INDEX_VERSION; } - public void testSparseVectorWith7xIndex() throws Exception { - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), PREVIOUS_SPARSE_VECTOR_INDEX_VERSION); - - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("my-vector") - .field("type", "sparse_vector") - .endObject() - .endObject() - .endObject() - .endObject(); - - DocumentMapper mapper = createDocumentMapper(version, builder); - assertWarnings(SparseVectorFieldMapper.ERROR_MESSAGE_7X); - - // Check that new vectors cannot be indexed. - int[] indexedDims = { 65535, 50, 2 }; - float[] indexedValues = { 0.5f, 1800f, -34567.11f }; - BytesReference source = BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .startObject("my-vector") - .field(Integer.toString(indexedDims[0]), indexedValues[0]) - .field(Integer.toString(indexedDims[1]), indexedValues[1]) - .field(Integer.toString(indexedDims[2]), indexedValues[2]) - .endObject() - .endObject() - ); - - DocumentParsingException indexException = expectThrows( - DocumentParsingException.class, - () -> mapper.parse(new SourceToParse("id", source, XContentType.JSON)) - ); - assertThat(indexException.getCause().getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE_7X)); - } - public void testSparseVectorUnsupportedIndex() throws Exception { IndexVersion version = IndexVersionUtils.randomVersionBetween( random(), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoderTests.java index 80b08f907be8d..9e581aa7711ef 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoderTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; @@ -27,7 +28,7 @@ public void testVectorDecodingWithOffset() { for (IndexVersion version : List.of( IndexVersionUtils.randomVersionBetween( random(), - DenseVectorFieldMapper.MAGNITUDE_STORED_INDEX_VERSION, + IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.getPreviousVersion(DenseVectorFieldMapper.LITTLE_ENDIAN_FLOAT_STORED_INDEX_VERSION) ), DenseVectorFieldMapper.LITTLE_ENDIAN_FLOAT_STORED_INDEX_VERSION diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 278d4ae505bdc..48e8f0ef11676 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -373,7 +373,7 @@ public void testParseFailsWithTermsArray() { "message1" : ["term1", "term2"] } }"""; - expectThrows(IllegalStateException.class, () -> parseQuery(json2)); + expectThrows(IllegalArgumentException.class, () -> parseQuery(json2)); } public void testExceptionUsingAnalyzerOnNumericField() { diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java index a003436fc0523..f45b349d0770b 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -73,7 +73,7 @@ public float score(float freq, long norm) { }; IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(IndexVersions.V_7_0_0, negativeScoresSim) + () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_COMPATIBLE, negativeScoresSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarities should not return negative scores")); @@ -98,7 +98,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(IndexVersions.V_7_0_0, decreasingScoresWithFreqSim) + () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_COMPATIBLE, decreasingScoresWithFreqSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not decrease when term frequency increases")); @@ -123,7 +123,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(IndexVersions.V_7_0_0, increasingScoresWithNormSim) + () -> SimilarityService.validateSimilarity(IndexVersions.MINIMUM_COMPATIBLE, increasingScoresWithNormSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not increase when norm increases")); } diff --git a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java index 00a657b7fdcf1..dce54b11b720c 100644 --- a/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java +++ b/server/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java @@ -16,6 +16,7 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; import java.util.Random; import static org.hamcrest.Matchers.equalTo; @@ -75,7 +76,123 @@ protected InputStream openSlice(int slice) throws IOException { for (int i = 0; i < streams.length; i++) { assertTrue(streams[i].closed); } + } + + public void testRandomMarkReset() throws IOException { + final int slices = randomIntBetween(1, 20); + final var bytes = randomByteArrayOfLength(randomIntBetween(1000, 10000)); + final int sliceSize = bytes.length / slices; + + final var streamsOpened = new ArrayList(); + SlicedInputStream input = new SlicedInputStream(slices) { + @Override + protected InputStream openSlice(int slice) throws IOException { + final int sliceOffset = slice * sliceSize; + final int length = slice == slices - 1 ? bytes.length - sliceOffset : sliceSize; + final var stream = new CheckClosedInputStream(new ByteArrayInputStream(bytes, sliceOffset, length)); + streamsOpened.add(stream); + return stream; + } + }; + + // Read up to a random point + final int mark = randomIntBetween(0, bytes.length); + if (mark > 0) { + final var bytesReadUntilMark = new byte[mark]; + input.readNBytes(bytesReadUntilMark, 0, mark); + final var expectedBytesUntilMark = new ByteArrayInputStream(bytes, 0, mark).readAllBytes(); + assertArrayEquals(expectedBytesUntilMark, bytesReadUntilMark); + } + + // Reset should throw since there is no mark + expectThrows(IOException.class, input::reset); + + // Mark + input.mark(randomNonNegativeInt()); + + // Read up to another random point + final int moreBytes = randomIntBetween(0, bytes.length - mark); + if (moreBytes > 0) { + final var moreBytesRead = new byte[moreBytes]; + input.readNBytes(moreBytesRead, 0, moreBytes); + final var expectedMoreBytes = new ByteArrayInputStream(bytes, mark, moreBytes).readAllBytes(); + assertArrayEquals(expectedMoreBytes, moreBytesRead); + } + + // Reset + input.reset(); + + // Read all remaining bytes, which should be the bytes from mark up to the end + final int remainingBytes = bytes.length - mark; + if (remainingBytes > 0) { + final var remainingBytesRead = new byte[remainingBytes]; + input.readNBytes(remainingBytesRead, 0, remainingBytes); + final var expectedRemainingBytes = new ByteArrayInputStream(bytes, mark, remainingBytes).readAllBytes(); + assertArrayEquals(expectedRemainingBytes, remainingBytesRead); + } + + // Confirm we reached the end and close the stream + assertThat(input.read(), equalTo(-1)); + input.close(); + streamsOpened.forEach(stream -> assertTrue(stream.closed)); + } + + public void testMarkResetClosedStream() throws IOException { + final int slices = randomIntBetween(1, 20); + SlicedInputStream input = new SlicedInputStream(slices) { + @Override + protected InputStream openSlice(int slice) throws IOException { + return new ByteArrayInputStream(new byte[] { 0 }, 0, 1); + } + }; + + input.skip(randomIntBetween(1, slices)); + input.mark(randomNonNegativeInt()); + input.close(); + // SlicedInputStream supports reading -1 after close without throwing + assertThat(input.read(), equalTo(-1)); + expectThrows(IOException.class, input::reset); + assertThat(input.read(), equalTo(-1)); + input.mark(randomNonNegativeInt()); + assertThat(input.read(), equalTo(-1)); + } + + public void testMarkResetUnsupportedStream() throws IOException { + final int slices = randomIntBetween(1, 20); + SlicedInputStream input = new SlicedInputStream(slices) { + @Override + protected InputStream openSlice(int slice) throws IOException { + return new ByteArrayInputStream(new byte[] { 0 }, 0, 1); + } + + @Override + public boolean markSupported() { + return false; + } + }; + input.mark(randomNonNegativeInt()); + expectThrows(IOException.class, input::reset); + input.close(); + } + + public void testMarkResetZeroSlices() throws IOException { + SlicedInputStream input = new SlicedInputStream(0) { + @Override + protected InputStream openSlice(int slice) throws IOException { + throw new AssertionError("should not be called"); + } + }; + + if (randomBoolean()) { + // randomly initialize the stream + assertThat(input.read(), equalTo(-1)); + } + + input.mark(randomNonNegativeInt()); + input.reset(); + assertThat(input.read(), equalTo(-1)); + input.close(); } private int readFully(InputStream stream, byte[] buffer) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index 628ff4b99b133..686acc367ade5 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.indices; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; @@ -96,6 +97,8 @@ public Map getMetadataMappers() { DataStreamTimestampFieldMapper.NAME, FieldNamesFieldMapper.NAME }; + @UpdateForV9 + @AwaitsFix(bugUrl = "test is referencing 7.x index versions so needs to be updated for 9.0 bump") public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index c4d6cb6be502d..ff65464085c03 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.IndexService.IndexCreationContext; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.Analysis; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.CharFilterFactory; @@ -187,34 +186,6 @@ public void testUnderscoreInAnalyzerName() throws IOException { } } - public void testStandardFilterBWC() throws IOException { - // standard tokenfilter should have been removed entirely in the 7x line. However, a - // cacheing bug meant that it was still possible to create indexes using a standard - // filter until 7.6 - { - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_6_0, IndexVersion.current()); - final Settings settings = Settings.builder() - .put("index.analysis.analyzer.my_standard.tokenizer", "standard") - .put("index.analysis.analyzer.my_standard.filter", "standard") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetadata.SETTING_VERSION_CREATED, version) - .build(); - IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> getIndexAnalyzers(settings)); - assertThat(exc.getMessage(), equalTo("The [standard] token filter has been removed.")); - } - { - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2); - final Settings settings = Settings.builder() - .put("index.analysis.analyzer.my_standard.tokenizer", "standard") - .put("index.analysis.analyzer.my_standard.filter", "standard") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetadata.SETTING_VERSION_CREATED, version) - .build(); - getIndexAnalyzers(settings); - assertWarnings("The [standard] token filter is deprecated and will be removed in a future version."); - } - } - /** * Tests that plugins can register pre-configured char filters that vary in behavior based on Elasticsearch version, Lucene version, * and that do not vary based on version at all. diff --git a/server/src/test/java/org/elasticsearch/rest/RestCompatibleVersionHelperTests.java b/server/src/test/java/org/elasticsearch/rest/RestCompatibleVersionHelperTests.java index acefa2958ea0a..796a16c00da58 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestCompatibleVersionHelperTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestCompatibleVersionHelperTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ParsedMediaType; import org.hamcrest.CustomTypeSafeMatcher; @@ -163,6 +164,8 @@ public void testAcceptAndContentTypeCombinations() { assertThat(requestWith(acceptHeader(null), contentTypeHeader("application/json"), bodyPresent()), not(isCompatible())); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this can be re-enabled once our rest api version is bumped to V_9") public void testObsoleteVersion() { ElasticsearchStatusException e = expectThrows( ElasticsearchStatusException.class, diff --git a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java index 8bd53047b2dc7..78e1d8f8440a9 100644 --- a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java @@ -45,11 +45,15 @@ public void testFloatVectorClassBindings() throws IOException { List fields = List.of( new BinaryDenseVectorDocValuesField( - BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersions.V_7_4_0), + BinaryDenseVectorScriptDocValuesTests.wrap( + new float[][] { docVector }, + ElementType.FLOAT, + IndexVersions.MINIMUM_COMPATIBLE + ), "test", ElementType.FLOAT, dims, - IndexVersions.V_7_4_0 + IndexVersions.MINIMUM_COMPATIBLE ), new BinaryDenseVectorDocValuesField( BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.current()), @@ -238,11 +242,15 @@ public void testByteVsFloatSimilarity() throws IOException { List fields = List.of( new BinaryDenseVectorDocValuesField( - BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersions.V_7_4_0), + BinaryDenseVectorScriptDocValuesTests.wrap( + new float[][] { docVector }, + ElementType.FLOAT, + IndexVersions.MINIMUM_COMPATIBLE + ), "field0", ElementType.FLOAT, dims, - IndexVersions.V_7_4_0 + IndexVersions.MINIMUM_COMPATIBLE ), new BinaryDenseVectorDocValuesField( BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.current()), diff --git a/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java b/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java index 2be338efd7174..d06593d0349ba 100644 --- a/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java +++ b/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java @@ -68,7 +68,7 @@ public void testFloatVsListQueryVector() { assertEquals(knn.cosineSimilarity(arrayQV), knn.cosineSimilarity(listQV), 0.001f); assertEquals(knn.cosineSimilarity((Object) listQV), knn.cosineSimilarity((Object) arrayQV), 0.001f); - for (IndexVersion indexVersion : List.of(IndexVersions.V_7_4_0, IndexVersion.current())) { + for (IndexVersion indexVersion : List.of(IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current())) { BytesRef value = BinaryDenseVectorScriptDocValuesTests.mockEncodeDenseVector(docVector, ElementType.FLOAT, indexVersion); BinaryDenseVector bdv = new BinaryDenseVector(docVector, value, dims, indexVersion); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 86aaa66b85bd5..c922feeb9f660 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -507,22 +507,6 @@ public final void testMeta() throws IOException { ); } - public final void testDeprecatedBoostWarning() throws IOException { - try { - createMapperService(DEPRECATED_BOOST_INDEX_VERSION, fieldMapping(b -> { - minimalMapping(b, DEPRECATED_BOOST_INDEX_VERSION); - b.field("boost", 2.0); - })); - String[] warnings = Strings.concatStringArrays( - getParseMinimalWarnings(DEPRECATED_BOOST_INDEX_VERSION), - new String[] { "Parameter [boost] on field [field] is deprecated and has no effect" } - ); - assertWarnings(warnings); - } catch (MapperParsingException e) { - assertThat(e.getMessage(), anyOf(containsString("Unknown parameter [boost]"), containsString("[boost : 2.0]"))); - } - } - public void testBoostNotAllowed() throws IOException { MapperParsingException e = expectThrows( MapperParsingException.class, diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java index 1b00ba3e9fd09..dd3c59e8fc365 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -142,10 +143,13 @@ public final void testFixedMetaFieldsAreNotConfigurable() throws IOException { assertEquals("Failed to parse mapping: " + fieldName() + " is not configurable", exception.getMessage()); } + @UpdateForV9 + // This was previously testing for index versions between 7.0.0 and 8.6.0 but has been bumped to 8.0.0 - 8.6.0 + // Verify this is the correct action. public void testTypeAndFriendsAreAcceptedBefore_8_6_0() throws IOException { assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); IndexVersion previousVersion = IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_6_0); - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, previousVersion); + IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, previousVersion); assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); MapperService mapperService = createMapperService(version, mapping(b -> {})); // these parameters were previously silently ignored, they will still be ignored in existing indices diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index ad738d8985e03..728e1ca73ded0 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.plugins.Plugin; @@ -364,12 +365,14 @@ protected static Settings.Builder indexSettingsNoReplicas(int shards) { /** * Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format. */ + @UpdateForV9 + // This used to pick an index version from 7.0.0 to 8.9.0. The minimum now is 8.0.0 but it's not clear what the upper range should be protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) throws Exception { if (randomBoolean() && randomBoolean()) { initWithSnapshotVersion( repoName, repoPath, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_8_9_0) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_9_0) ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index ea632599fedbf..3385cb1eb2e7e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -36,6 +36,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Objects; import java.util.Random; import java.util.Set; @@ -91,7 +92,7 @@ public void wipe(Set excludeTemplates) { l.delegateResponse((ll, e) -> { // Ignore if action isn't registered, because data streams is a module and // if the delete action isn't registered then there no data streams to delete. - if (e.getMessage().startsWith("failed to find action") == false) { + if (Objects.requireNonNullElse(e.getMessage(), "").startsWith("failed to find action") == false) { ll.onFailure(e); } else { ll.onResponse(AcknowledgedResponse.TRUE); diff --git a/x-pack/plugin/autoscaling/qa/rest/build.gradle b/x-pack/plugin/autoscaling/qa/rest/build.gradle index 19254880a7089..c79644ee31225 100644 --- a/x-pack/plugin/autoscaling/qa/rest/build.gradle +++ b/x-pack/plugin/autoscaling/qa/rest/build.gradle @@ -13,12 +13,6 @@ restResources { } } -tasks.named("yamlRestTestV7CompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - "autoscaling/get_autoscaling_capacity/Test get fixed autoscaling capacity", - ].join(',') -} - testClusters.configureEach { testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 09ffae9b24be1..4fdc4c3af4190 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -22,8 +22,7 @@ dependencies { // let the yamlRestTests see the classpath of test GradleUtils.extendSourceSet(project, "test", "yamlRestTest", tasks.named("yamlRestTest")) -int compatVersion = VersionProperties.getElasticsearchVersion().getMajor() - 1; -GradleUtils.extendSourceSet(project, "test", "yamlRestTestV${compatVersion}Compat") +GradleUtils.extendSourceSet(project, "test", "yamlRestCompatTest") restResources { restApi { @@ -59,121 +58,6 @@ tasks.named("yamlRestTest").configure { systemProperty 'tests.rest.blacklist', restTestBlacklist.join(',') } -tasks.named("yamlRestTestV7CompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'unsigned_long/50_script_values/Scripted sort values', - 'unsigned_long/50_script_values/script_score query', - 'unsigned_long/50_script_values/Script query', - 'aggregate-metrics/90_tsdb_mappings/aggregate_double_metric with time series mappings', - 'aggregate-metrics/90_tsdb_mappings/aggregate_double_metric with wrong time series mappings', - 'analytics/histogram/histogram with wrong time series mappings', - 'analytics/histogram/histogram with time series mappings', - 'analytics/boxplot/Basic Search', - 'analytics/boxplot/Search With Runtime Field', - 'analytics/boxplot/Search With Missing', - 'analytics/moving_percentile/Basic Search TDigest', - 'ml/evaluate_data_frame/Test outlier_detection auc_roc', - 'ml/evaluate_data_frame/Test outlier_detection auc_roc given actual_field is int', - 'ml/evaluate_data_frame/Test outlier_detection auc_roc include curve', - 'ml/evaluate_data_frame/Test classification auc_roc', - 'ml/evaluate_data_frame/Test classification auc_roc with default top_classes_field', - ].join(',') -} - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest( - "vectors/10_dense_vector_basic/Deprecated function signature", - "to support it, it would require to almost revert back the #48725 and complicate the code" - ) - task.skipTest( - "vectors/20_dense_vector_special_cases/Indexing of Dense vectors should error when dims don't match defined in the mapping", - "Error message has changed" - ) - task.skipTest("vectors/30_sparse_vector_basic/Cosine Similarity", "not supported for compatibility") - task.skipTest("vectors/30_sparse_vector_basic/Deprecated function signature", "not supported for compatibility") - task.skipTest("vectors/30_sparse_vector_basic/Dot Product", "not supported for compatibility") - task.skipTest("vectors/35_sparse_vector_l1l2/L1 norm", "not supported for compatibility") - task.skipTest("vectors/35_sparse_vector_l1l2/L2 norm", "not supported for compatibility") - task.skipTest("vectors/40_sparse_vector_special_cases/Dimensions can be sorted differently", "not supported for compatibility") - task.skipTest("vectors/40_sparse_vector_special_cases/Documents missing a vector field", "not supported for compatibility") - task.skipTest( - "vectors/40_sparse_vector_special_cases/Query vector has different dimensions from documents' vectors", - "not supported for compatibility" - ) - task.skipTest("vectors/40_sparse_vector_special_cases/Sparse vectors should error with dense vector functions", "not supported for compatibility") - task.skipTest("vectors/40_sparse_vector_special_cases/Vectors of different dimensions and data types", "not supported for compatibility") - task.skipTest("vectors/50_vector_stats/Usage stats on vector fields", "not supported for compatibility") - task.skipTest( - "roles/30_prohibited_role_query/Test use prohibited query inside role query", - "put role request with a term lookup (deprecated) and type. Requires validation in REST layer" - ) - task.skipTest("ml/jobs_crud/Test update job", "Behaviour change #89824 - added limit filter to categorization analyzer") - task.skipTest("ml/jobs_crud/Test create job with delimited format", "removing undocumented functionality") - task.skipTest("ml/jobs_crud/Test cannot create job with model snapshot id set", "Exception type has changed.") - task.skipTest("ml/validate/Test job config is invalid because model snapshot id set", "Exception type has changed.") - task.skipTest("ml/datafeeds_crud/Test update datafeed to point to missing job", "behaviour change #44752 - not allowing to update datafeed job_id") - task.skipTest( - "ml/datafeeds_crud/Test update datafeed to point to different job", - "behaviour change #44752 - not allowing to update datafeed job_id" - ) - task.skipTest( - "ml/datafeeds_crud/Test update datafeed to point to job already attached to another datafeed", - "behaviour change #44752 - not allowing to update datafeed job_id" - ) - task.skipTest( - "ml/trained_model_cat_apis/Test cat trained models", - "A type field was added to cat.ml_trained_models #73660, this is a backwards compatible change. Still this is a cat api, and we don't support them with rest api compatibility. (the test would be very hard to transform too)" - ) - task.skipTest( - "ml/categorization_agg/Test categorization agg simple", - "categorize_text was changed in 8.3, but experimental prior to the change" - ) - task.skipTest( - "ml/categorization_agg/Test categorization aggregation against unsupported field", - "categorize_text was changed in 8.3, but experimental prior to the change" - ) - task.skipTest( - "ml/categorization_agg/Test categorization aggregation with poor settings", - "categorize_text was changed in 8.3, but experimental prior to the change" - ) - task.skipTest("indices.freeze/30_usage/Usage stats on frozen indices", "#70192 -- the freeze index API is removed from 8.0") - task.skipTest("indices.freeze/20_stats/Translog stats on frozen indices", "#70192 -- the freeze index API is removed from 8.0") - task.skipTest("indices.freeze/10_basic/Basic", "#70192 -- the freeze index API is removed from 8.0") - task.skipTest("indices.freeze/10_basic/Test index options", "#70192 -- the freeze index API is removed from 8.0") - task.skipTest("sql/sql/Paging through results", "scrolling through search hit queries no longer produces empty last page in 8.2") - task.skipTest("sql/translate/Translate SQL", "query folding changed in v 8.5, added track_total_hits: -1") - task.skipTest("service_accounts/10_basic/Test get service accounts", "new service accounts are added") - task.skipTest("spatial/70_script_doc_values/diagonal length", "precision changed in 8.4.0") - task.skipTest("spatial/70_script_doc_values/geoshape value", "error message changed in 8.9.0") - task.skipTest("security/authz/14_cat_indices/Test empty request while single authorized index", "not supported for compatibility") - task.skipTestsByFilePattern("**/rollup/**", "The rollup yaml tests in the 7.x branch don't know how to fake a cluster with rollup usage") - task.skipTest("data_stream/10_basic/Create hidden data stream", "warning does not exist for compatibility") - - task.replaceValueInMatch("_type", "_doc") - task.addAllowedWarningRegex("\\[types removal\\].*") - task.addAllowedWarningRegexForTest("Including \\[accept_enterprise\\] in get license.*", "Installing enterprise license") - task.addAllowedWarningRegex("bucket_span .* is not an integral .* of the number of seconds in 1d.* This is now deprecated.*") - - task.replaceValueTextByKeyValue( - "catch", - 'bad_request', - '/It is no longer possible to freeze indices, but existing frozen indices can still be unfrozen/', - "Cannot freeze write index for data stream" - ) - - task.replaceValueInMatch( - "error.reason", - "action [cluster:admin/xpack/security/api_key/invalidate] is unauthorized for user [api_key_user_1] with effective roles [user_role], this action is granted by the cluster privileges [manage_api_key,manage_security,all]", - "Test invalidate api key by realm name" - ) - - task.replaceValueInMatch( - "error.reason", - "action [cluster:admin/xpack/security/api_key/invalidate] is unauthorized for user [api_key_user_1] with effective roles [user_role], this action is granted by the cluster privileges [manage_api_key,manage_security,all]", - "Test invalidate api key by username" - ) -} - tasks.register('enforceApiSpecsConvention').configure { def mainApiSpecs = fileTree('src/test/resources/rest-api-spec/api') doLast { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 467ef3c68f648..008792966a4b2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -1812,6 +1813,8 @@ void updateAutoFollowMetadata(Function updateFunctio assertThat(counter.get(), equalTo(states.length)); } + @UpdateForV9 + @AwaitsFix(bugUrl = "ability to disable soft deletes was removed in 8.0 indexes so we can probably remove this test") public void testAutoFollowerSoftDeletesDisabled() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString(), any(), any())).thenReturn(new RedirectToLocalClusterRemoteClusterClient(client)); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java index 996bc0eff5c1c..a08eb935178cf 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportActionIT.java @@ -14,12 +14,14 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAlias; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; @@ -34,11 +36,13 @@ import org.elasticsearch.xpack.core.XPackClientPlugin; import org.junit.After; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; @@ -64,24 +68,58 @@ private void cleanup() throws Exception { return clusterStateBuilder.build(); }); updateClusterSettings( - Settings.builder().put(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING.getKey(), (String) null) + Settings.builder() + .putNull(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING.getKey()) + .putNull(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey()) ); } @SuppressWarnings("unchecked") public void testAction() throws Exception { - assertUsageResults(0, 0, 0, 0.0, true); - AtomicLong totalCount = new AtomicLong(0); - AtomicLong countLifecycleWithRetention = new AtomicLong(0); + // test empty results + { + Map map = getLifecycleUsage(); + assertThat(map.get("available"), equalTo(true)); + assertThat(map.get("enabled"), equalTo(true)); + assertThat(map.get("count"), equalTo(0)); + assertThat(map.get("default_rollover_used"), equalTo(true)); + + Map dataRetentionMap = (Map) map.get("data_retention"); + assertThat(dataRetentionMap.size(), equalTo(1)); + assertThat(dataRetentionMap.get("configured_data_streams"), equalTo(0)); + + Map effectiveRetentionMap = (Map) map.get("effective_retention"); + assertThat(effectiveRetentionMap.size(), equalTo(1)); + assertThat(effectiveRetentionMap.get("retained_data_streams"), equalTo(0)); + + Map globalRetentionMap = (Map) map.get("global_retention"); + assertThat(globalRetentionMap.get("max"), equalTo(Map.of("defined", false))); + assertThat(globalRetentionMap.get("default"), equalTo(Map.of("defined", false))); + } + + // Keep track of the data streams created + AtomicInteger dataStreamsWithLifecycleCount = new AtomicInteger(0); + AtomicInteger dataStreamsWithRetentionCount = new AtomicInteger(0); + AtomicInteger dataStreamsWithDefaultRetentionCount = new AtomicInteger(0); + AtomicLong totalRetentionTimes = new AtomicLong(0); AtomicLong minRetention = new AtomicLong(Long.MAX_VALUE); AtomicLong maxRetention = new AtomicLong(Long.MIN_VALUE); + boolean useDefaultRolloverConfig = randomBoolean(); if (useDefaultRolloverConfig == false) { updateClusterSettings( Settings.builder().put(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING.getKey(), "min_docs=33") ); } + TimeValue defaultRetention = TimeValue.timeValueDays(10); + boolean useDefaultRetention = randomBoolean(); + if (useDefaultRetention) { + updateClusterSettings( + Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), defaultRetention.getStringRep()) + ); + } /* * We now add a number of simulated data streams to the cluster state. Some have lifecycles, some don't. The ones with lifecycles * have varying retention periods. After adding them, we make sure the numbers add up. @@ -89,19 +127,25 @@ public void testAction() throws Exception { updateClusterState(clusterState -> { Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); Map dataStreamMap = new HashMap<>(); - for (int dataStreamCount = 0; dataStreamCount < randomInt(200); dataStreamCount++) { - boolean hasLifecycle = randomBoolean(); + boolean atLeastOne = false; + for (int dataStreamCount = 0; dataStreamCount < randomIntBetween(1, 200); dataStreamCount++) { + boolean hasLifecycle = randomBoolean() || atLeastOne == false; DataStreamLifecycle lifecycle; + boolean systemDataStream = rarely(); if (hasLifecycle) { if (randomBoolean()) { lifecycle = new DataStreamLifecycle(null, null, null); - totalCount.incrementAndGet(); + dataStreamsWithLifecycleCount.incrementAndGet(); + if (useDefaultRetention && systemDataStream == false) { + dataStreamsWithDefaultRetentionCount.incrementAndGet(); + } + atLeastOne = true; } else { long retentionMillis = randomLongBetween(1000, 100000); - boolean isEnabled = randomBoolean(); + boolean isEnabled = randomBoolean() || atLeastOne == false; if (isEnabled) { - totalCount.incrementAndGet(); - countLifecycleWithRetention.incrementAndGet(); + dataStreamsWithLifecycleCount.incrementAndGet(); + dataStreamsWithRetentionCount.incrementAndGet(); totalRetentionTimes.addAndGet(retentionMillis); if (retentionMillis < minRetention.get()) { @@ -110,6 +154,7 @@ public void testAction() throws Exception { if (retentionMillis > maxRetention.get()) { maxRetention.set(retentionMillis); } + atLeastOne = true; } lifecycle = DataStreamLifecycle.newBuilder().dataRetention(retentionMillis).enabled(isEnabled).build(); } @@ -121,7 +166,6 @@ public void testAction() throws Exception { Index index = new Index(randomAlphaOfLength(60), randomAlphaOfLength(60)); indices.add(index); } - boolean systemDataStream = randomBoolean(); boolean replicated = randomBoolean(); DataStream dataStream = new DataStream( randomAlphaOfLength(50), @@ -147,28 +191,59 @@ public void testAction() throws Exception { clusterStateBuilder.metadata(metadataBuilder); return clusterStateBuilder.build(); }); - int expectedMinimumRetention = minRetention.get() == Long.MAX_VALUE ? 0 : minRetention.intValue(); - int expectedMaximumRetention = maxRetention.get() == Long.MIN_VALUE ? 0 : maxRetention.intValue(); - double expectedAverageRetention = countLifecycleWithRetention.get() == 0 + + int retainedDataStreams = dataStreamsWithRetentionCount.get() + dataStreamsWithDefaultRetentionCount.get(); + + int expectedMinimumDataRetention = minRetention.get() == Long.MAX_VALUE ? 0 : minRetention.intValue(); + int expectedMinimumEffectiveRetention = dataStreamsWithDefaultRetentionCount.get() > 0 + ? (int) Math.min(minRetention.get(), defaultRetention.getMillis()) + : expectedMinimumDataRetention; + + int expectedMaximumDataRetention = maxRetention.get() == Long.MIN_VALUE ? 0 : maxRetention.intValue(); + int expectedMaximumEffectiveRetention = dataStreamsWithDefaultRetentionCount.get() > 0 + ? (int) Math.max(maxRetention.get(), defaultRetention.getMillis()) + : expectedMaximumDataRetention; + + double expectedAverageDataRetention = dataStreamsWithRetentionCount.get() == 0 ? 0.0 - : totalRetentionTimes.doubleValue() / countLifecycleWithRetention.get(); - assertUsageResults( - totalCount.intValue(), - expectedMinimumRetention, - expectedMaximumRetention, - expectedAverageRetention, - useDefaultRolloverConfig - ); + : totalRetentionTimes.doubleValue() / dataStreamsWithRetentionCount.get(); + double expectedAverageEffectiveRetention = dataStreamsWithDefaultRetentionCount.get() > 0 + ? (totalRetentionTimes.doubleValue() + dataStreamsWithDefaultRetentionCount.get() * defaultRetention.getMillis()) + / retainedDataStreams + : expectedAverageDataRetention; + + Map map = getLifecycleUsage(); + assertThat(map.get("available"), equalTo(true)); + assertThat(map.get("enabled"), equalTo(true)); + assertThat(map.get("count"), equalTo(dataStreamsWithLifecycleCount.intValue())); + assertThat(map.get("default_rollover_used"), equalTo(useDefaultRolloverConfig)); + + Map dataRetentionMap = (Map) map.get("data_retention"); + assertThat(dataRetentionMap.get("configured_data_streams"), equalTo(dataStreamsWithRetentionCount.get())); + if (dataStreamsWithRetentionCount.get() > 0) { + assertThat(dataRetentionMap.get("minimum_millis"), equalTo(expectedMinimumDataRetention)); + assertThat(dataRetentionMap.get("maximum_millis"), equalTo(expectedMaximumDataRetention)); + assertThat(dataRetentionMap.get("average_millis"), equalTo(expectedAverageDataRetention)); + } + + Map effectieRetentionMap = (Map) map.get("effective_retention"); + assertThat(effectieRetentionMap.get("retained_data_streams"), equalTo(retainedDataStreams)); + if (retainedDataStreams > 0) { + assertThat(effectieRetentionMap.get("minimum_millis"), equalTo(expectedMinimumEffectiveRetention)); + assertThat(effectieRetentionMap.get("maximum_millis"), equalTo(expectedMaximumEffectiveRetention)); + assertThat(effectieRetentionMap.get("average_millis"), equalTo(expectedAverageEffectiveRetention)); + } + + Map> globalRetentionMap = (Map>) map.get("global_retention"); + assertThat(globalRetentionMap.get("max").get("defined"), equalTo(false)); + assertThat(globalRetentionMap.get("default").get("defined"), equalTo(useDefaultRetention)); + if (useDefaultRetention) { + assertThat(globalRetentionMap.get("default").get("affected_data_streams"), equalTo(dataStreamsWithDefaultRetentionCount.get())); + assertThat(globalRetentionMap.get("default").get("retention_millis"), equalTo((int) defaultRetention.getMillis())); + } } - @SuppressWarnings("unchecked") - private void assertUsageResults( - int count, - int minimumRetention, - int maximumRetention, - double averageRetention, - boolean defaultRolloverUsed - ) throws Exception { + private Map getLifecycleUsage() throws IOException { XPackUsageFeatureResponse response = safeGet(client().execute(DATA_STREAM_LIFECYCLE, new XPackUsageRequest(SAFE_AWAIT_TIMEOUT))); XContentBuilder builder = XContentFactory.jsonBuilder(); builder = response.getUsage().toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -177,17 +252,7 @@ private void assertUsageResults( true, XContentType.JSON ); - - Map map = tuple.v2(); - assertThat(map.get("available"), equalTo(true)); - assertThat(map.get("enabled"), equalTo(true)); - assertThat(map.get("count"), equalTo(count)); - assertThat(map.get("default_rollover_used"), equalTo(defaultRolloverUsed)); - Map retentionMap = (Map) map.get("retention"); - assertThat(retentionMap.size(), equalTo(3)); - assertThat(retentionMap.get("minimum_millis"), equalTo(minimumRetention)); - assertThat(retentionMap.get("maximum_millis"), equalTo(maximumRetention)); - assertThat(retentionMap.get("average_millis"), equalTo(averageRetention)); + return tuple.v2(); } /* diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java index 657a13aa0f00c..e74d1bb0ad647 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataStreamLifecycleUsageTransportAction.java @@ -12,9 +12,12 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.protocol.xpack.XPackUsageRequest; @@ -24,17 +27,22 @@ import org.elasticsearch.xpack.core.datastreams.DataStreamLifecycleFeatureSetUsage; import java.util.Collection; +import java.util.HashMap; import java.util.LongSummaryStatistics; +import java.util.Map; public class DataStreamLifecycleUsageTransportAction extends XPackUsageFeatureTransportAction { + private final DataStreamGlobalRetentionSettings globalRetentionSettings; + @Inject public DataStreamLifecycleUsageTransportAction( TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( XPackUsageFeatureAction.DATA_STREAM_LIFECYCLE.name(), @@ -44,6 +52,7 @@ public DataStreamLifecycleUsageTransportAction( actionFilters, indexNameExpressionResolver ); + this.globalRetentionSettings = globalRetentionSettings; } @Override @@ -54,42 +63,92 @@ protected void masterOperation( ActionListener listener ) { final Collection dataStreams = state.metadata().dataStreams().values(); - Tuple stats = calculateStats(dataStreams); - - long minRetention = stats.v2().getCount() == 0 ? 0 : stats.v2().getMin(); - long maxRetention = stats.v2().getCount() == 0 ? 0 : stats.v2().getMax(); - double averageRetention = stats.v2().getAverage(); - RolloverConfiguration rolloverConfiguration = clusterService.getClusterSettings() - .get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING); - String rolloverConfigString = rolloverConfiguration.toString(); - final DataStreamLifecycleFeatureSetUsage.LifecycleStats lifecycleStats = new DataStreamLifecycleFeatureSetUsage.LifecycleStats( - stats.v1(), - minRetention, - maxRetention, - averageRetention, - DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING.getDefault(null).toString().equals(rolloverConfigString) + DataStreamLifecycleFeatureSetUsage.LifecycleStats lifecycleStats = calculateStats( + dataStreams, + clusterService.getClusterSettings().get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING), + globalRetentionSettings.get() ); - - final DataStreamLifecycleFeatureSetUsage usage = new DataStreamLifecycleFeatureSetUsage(lifecycleStats); - listener.onResponse(new XPackUsageFeatureResponse(usage)); + listener.onResponse(new XPackUsageFeatureResponse(new DataStreamLifecycleFeatureSetUsage(lifecycleStats))); } /** - * Counts the number of data streams that have a lifecycle configured (and enabled) and for - * the data streams that have a lifecycle it computes the min/max/average summary of the effective - * configured retention. + * Counts the number of data streams that have a lifecycle configured (and enabled), + * computes the min/max/average summary of the data and effective retention and tracks the usage of global retention. */ - public static Tuple calculateStats(Collection dataStreams) { + public static DataStreamLifecycleFeatureSetUsage.LifecycleStats calculateStats( + Collection dataStreams, + RolloverConfiguration rolloverConfiguration, + DataStreamGlobalRetention globalRetention + ) { + // Initialise counters of associated data streams long dataStreamsWithLifecycles = 0; - LongSummaryStatistics retentionStats = new LongSummaryStatistics(); + long dataStreamsWithDefaultRetention = 0; + long dataStreamsWithMaxRetention = 0; + + LongSummaryStatistics dataRetentionStats = new LongSummaryStatistics(); + LongSummaryStatistics effectiveRetentionStats = new LongSummaryStatistics(); + for (DataStream dataStream : dataStreams) { if (dataStream.getLifecycle() != null && dataStream.getLifecycle().isEnabled()) { dataStreamsWithLifecycles++; + // Track data retention if (dataStream.getLifecycle().getDataStreamRetention() != null) { - retentionStats.accept(dataStream.getLifecycle().getDataStreamRetention().getMillis()); + dataRetentionStats.accept(dataStream.getLifecycle().getDataStreamRetention().getMillis()); + } + // Track effective retention + Tuple effectiveDataRetentionWithSource = dataStream.getLifecycle() + .getEffectiveDataRetentionWithSource(globalRetention, dataStream.isInternal()); + + // Track global retention usage + if (effectiveDataRetentionWithSource.v1() != null) { + effectiveRetentionStats.accept(effectiveDataRetentionWithSource.v1().getMillis()); + if (effectiveDataRetentionWithSource.v2().equals(DataStreamLifecycle.RetentionSource.DEFAULT_GLOBAL_RETENTION)) { + dataStreamsWithDefaultRetention++; + } + if (effectiveDataRetentionWithSource.v2().equals(DataStreamLifecycle.RetentionSource.MAX_GLOBAL_RETENTION)) { + dataStreamsWithMaxRetention++; + } } } } - return new Tuple<>(dataStreamsWithLifecycles, retentionStats); + Map globalRetentionStats = getGlobalRetentionStats( + globalRetention, + dataStreamsWithDefaultRetention, + dataStreamsWithMaxRetention + ); + return new DataStreamLifecycleFeatureSetUsage.LifecycleStats( + dataStreamsWithLifecycles, + DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING.getDefault(null).equals(rolloverConfiguration), + DataStreamLifecycleFeatureSetUsage.RetentionStats.create(dataRetentionStats), + DataStreamLifecycleFeatureSetUsage.RetentionStats.create(effectiveRetentionStats), + globalRetentionStats + ); + } + + private static Map getGlobalRetentionStats( + DataStreamGlobalRetention globalRetention, + long dataStreamsWithDefaultRetention, + long dataStreamsWithMaxRetention + ) { + if (globalRetention == null) { + return Map.of(); + } + Map globalRetentionStats = new HashMap<>(); + if (globalRetention.defaultRetention() != null) { + globalRetentionStats.put( + DataStreamLifecycleFeatureSetUsage.LifecycleStats.DEFAULT_RETENTION_FIELD_NAME, + new DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats( + dataStreamsWithDefaultRetention, + globalRetention.defaultRetention() + ) + ); + } + if (globalRetention.maxRetention() != null) { + globalRetentionStats.put( + DataStreamLifecycleFeatureSetUsage.LifecycleStats.MAX_RETENTION_FIELD_NAME, + new DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats(dataStreamsWithMaxRetention, globalRetention.maxRetention()) + ); + } + return globalRetentionStats; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java index 91cce4126d3a3..4c550c69e4c09 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java @@ -13,11 +13,15 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; +import java.util.LongSummaryStatistics; +import java.util.Map; import java.util.Objects; public class DataStreamLifecycleFeatureSetUsage extends XPackFeatureSet.Usage { @@ -55,13 +59,7 @@ public TransportVersion getMinimalSupportedVersion() { protected void innerXContent(XContentBuilder builder, Params params) throws IOException { super.innerXContent(builder, params); if (enabled) { - builder.field("count", lifecycleStats.dataStreamsWithLifecyclesCount); - builder.field("default_rollover_used", lifecycleStats.defaultRolloverUsed); - builder.startObject("retention"); - builder.field("minimum_millis", lifecycleStats.minRetentionMillis); - builder.field("maximum_millis", lifecycleStats.maxRetentionMillis); - builder.field("average_millis", lifecycleStats.averageRetentionMillis); - builder.endObject(); + lifecycleStats.toXContent(builder, params); } } @@ -87,33 +85,53 @@ public boolean equals(Object obj) { return available == other.available && enabled == other.enabled && Objects.equals(lifecycleStats, other.lifecycleStats); } - public static class LifecycleStats implements Writeable { - - public static final LifecycleStats INITIAL = new LifecycleStats(0, 0, 0, 0, true); + public static class LifecycleStats implements Writeable, ToXContentFragment { + public static final LifecycleStats INITIAL = new LifecycleStats(0, true, RetentionStats.NO_DATA, RetentionStats.NO_DATA, Map.of()); + public static final String DEFAULT_RETENTION_FIELD_NAME = "default"; + public static final String MAX_RETENTION_FIELD_NAME = "max"; final long dataStreamsWithLifecyclesCount; - final long minRetentionMillis; - final long maxRetentionMillis; - final double averageRetentionMillis; final boolean defaultRolloverUsed; + final RetentionStats dataRetentionStats; + final RetentionStats effectiveRetentionStats; + final Map globalRetentionStats; public LifecycleStats( long dataStreamsWithLifecyclesCount, - long minRetention, - long maxRetention, - double averageRetention, - boolean defaultRolloverUsed + boolean defaultRolloverUsed, + RetentionStats dataRetentionStats, + RetentionStats effectiveRetentionStats, + Map globalRetentionStats ) { this.dataStreamsWithLifecyclesCount = dataStreamsWithLifecyclesCount; - this.minRetentionMillis = minRetention; - this.maxRetentionMillis = maxRetention; - this.averageRetentionMillis = averageRetention; this.defaultRolloverUsed = defaultRolloverUsed; + this.dataRetentionStats = dataRetentionStats; + this.effectiveRetentionStats = effectiveRetentionStats; + this.globalRetentionStats = globalRetentionStats; } public static LifecycleStats read(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { - return new LifecycleStats(in.readVLong(), in.readVLong(), in.readVLong(), in.readDouble(), in.readBoolean()); + if (in.getTransportVersion().onOrAfter(TransportVersions.GLOBAL_RETENTION_TELEMETRY)) { + return new LifecycleStats( + in.readVLong(), + in.readBoolean(), + RetentionStats.read(in), + RetentionStats.read(in), + in.readMap(GlobalRetentionStats::new) + ); + } else if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { + var dataStreamsWithLifecyclesCount = in.readVLong(); + var minDataRetention = in.readVLong(); + var maxDataRetention = in.readVLong(); + var avgDataRetention = in.readDouble(); + var defaultRolledOverUsed = in.readBoolean(); + return new LifecycleStats( + dataStreamsWithLifecyclesCount, + defaultRolledOverUsed, + new RetentionStats(dataStreamsWithLifecyclesCount, avgDataRetention, minDataRetention, maxDataRetention), + RetentionStats.NO_DATA, + Map.of() + ); } else { return INITIAL; } @@ -121,11 +139,17 @@ public static LifecycleStats read(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.GLOBAL_RETENTION_TELEMETRY)) { + out.writeVLong(dataStreamsWithLifecyclesCount); + out.writeBoolean(defaultRolloverUsed); + dataRetentionStats.writeTo(out); + effectiveRetentionStats.writeTo(out); + out.writeMap(globalRetentionStats, (o, v) -> v.writeTo(o)); + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeVLong(dataStreamsWithLifecyclesCount); - out.writeVLong(minRetentionMillis); - out.writeVLong(maxRetentionMillis); - out.writeDouble(averageRetentionMillis); + out.writeVLong(dataRetentionStats.minMillis() == null ? 0 : dataRetentionStats.minMillis()); + out.writeVLong(dataRetentionStats.maxMillis() == null ? 0 : dataRetentionStats.maxMillis()); + out.writeDouble(dataRetentionStats.avgMillis() == null ? 0 : dataRetentionStats.avgMillis()); out.writeBoolean(defaultRolloverUsed); } } @@ -134,10 +158,10 @@ public void writeTo(StreamOutput out) throws IOException { public int hashCode() { return Objects.hash( dataStreamsWithLifecyclesCount, - minRetentionMillis, - maxRetentionMillis, - averageRetentionMillis, - defaultRolloverUsed + defaultRolloverUsed, + dataRetentionStats, + effectiveRetentionStats, + globalRetentionStats ); } @@ -148,10 +172,101 @@ public boolean equals(Object obj) { } LifecycleStats other = (LifecycleStats) obj; return dataStreamsWithLifecyclesCount == other.dataStreamsWithLifecyclesCount - && minRetentionMillis == other.minRetentionMillis - && maxRetentionMillis == other.maxRetentionMillis - && averageRetentionMillis == other.averageRetentionMillis - && defaultRolloverUsed == other.defaultRolloverUsed; + && defaultRolloverUsed == other.defaultRolloverUsed + && Objects.equals(dataRetentionStats, other.dataRetentionStats) + && Objects.equals(effectiveRetentionStats, other.effectiveRetentionStats) + && Objects.equals(globalRetentionStats, other.globalRetentionStats); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("count", dataStreamsWithLifecyclesCount); + builder.field("default_rollover_used", defaultRolloverUsed); + + builder.startObject("data_retention"); + builder.field("configured_data_streams", dataRetentionStats.dataStreamCount()); + if (dataRetentionStats.dataStreamCount() > 0) { + builder.field("minimum_millis", dataRetentionStats.minMillis); + builder.field("maximum_millis", dataRetentionStats.maxMillis); + builder.field("average_millis", dataRetentionStats.avgMillis); + } + builder.endObject(); + + builder.startObject("effective_retention"); + builder.field("retained_data_streams", effectiveRetentionStats.dataStreamCount()); + if (effectiveRetentionStats.dataStreamCount() > 0) { + builder.field("minimum_millis", effectiveRetentionStats.minMillis); + builder.field("maximum_millis", effectiveRetentionStats.maxMillis); + builder.field("average_millis", effectiveRetentionStats.avgMillis); + } + builder.endObject(); + + builder.startObject("global_retention"); + globalRetentionStatsToXContent(builder, params, LifecycleStats.DEFAULT_RETENTION_FIELD_NAME); + globalRetentionStatsToXContent(builder, params, LifecycleStats.MAX_RETENTION_FIELD_NAME); + builder.endObject(); + return builder; + } + + private void globalRetentionStatsToXContent(XContentBuilder builder, Params params, String retentionType) throws IOException { + builder.startObject(retentionType); + GlobalRetentionStats stats = globalRetentionStats.get(retentionType); + builder.field("defined", stats != null); + if (stats != null) { + builder.field("affected_data_streams", stats.dataStreamCount()); + builder.field("retention_millis", stats.retention()); + } + builder.endObject(); + } + } + + public record RetentionStats(long dataStreamCount, Double avgMillis, Long minMillis, Long maxMillis) implements Writeable { + + static final RetentionStats NO_DATA = new RetentionStats(0, null, null, null); + + public static RetentionStats create(LongSummaryStatistics statistics) { + if (statistics.getCount() == 0) { + return NO_DATA; + } + return new RetentionStats(statistics.getCount(), statistics.getAverage(), statistics.getMin(), statistics.getMax()); + } + + public static RetentionStats read(StreamInput in) throws IOException { + long dataStreamCount = in.readVLong(); + if (dataStreamCount == 0) { + return NO_DATA; + } + double avgMillis = in.readDouble(); + long minMillis = in.readVLong(); + long maxMillis = in.readVLong(); + return new RetentionStats(dataStreamCount, avgMillis, minMillis, maxMillis); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(dataStreamCount); + if (dataStreamCount > 0) { + out.writeDouble(avgMillis); + out.writeVLong(minMillis); + out.writeVLong(maxMillis); + } + } + } + + public record GlobalRetentionStats(long dataStreamCount, long retention) implements Writeable { + + public GlobalRetentionStats(long dataStreamCount, TimeValue retention) { + this(dataStreamCount, retention.getMillis()); + } + + public GlobalRetentionStats(StreamInput in) throws IOException { + this(in.readVLong(), in.readVLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(dataStreamCount); + out.writeVLong(retention); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java index bfbc32e11e93d..b179195e87770 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java @@ -90,8 +90,13 @@ public void performAction( void performDownsampleIndex(String indexName, String downsampleIndexName, ActionListener listener) { DownsampleConfig config = new DownsampleConfig(fixedInterval); - DownsampleAction.Request request = new DownsampleAction.Request(indexName, downsampleIndexName, waitTimeout, config) - .masterNodeTimeout(TimeValue.MAX_VALUE); + DownsampleAction.Request request = new DownsampleAction.Request( + TimeValue.MAX_VALUE, + indexName, + downsampleIndexName, + waitTimeout, + config + ); // Currently, DownsampleAction always acknowledges action was complete when no exceptions are thrown. getClient().execute(DownsampleAction.INSTANCE, request, listener.delegateFailureAndWrap((l, response) -> l.onResponse(null))); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java index 19c4ad17ca4d9..ca57dbec5bef5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.rest.action; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -23,6 +24,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; @@ -59,4 +61,9 @@ public RestResponse buildResponse(XPackUsageResponse response, XContentBuilder b } ); } + + @Override + public Set supportedCapabilities() { + return Sets.union(super.supportedCapabilities(), Set.of("global_retention_telemetry")); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 6177329089bd3..89733761f3dc0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -420,7 +420,19 @@ static RoleDescriptor kibanaSystem(String name) { // For source indices of the Cloud Detection & Response (CDR) packages that ships a // transform RoleDescriptor.IndicesPrivileges.builder() - .indices("logs-wiz.vulnerability-*", "logs-wiz.cloud_configuration_finding-*", "logs-aws.securityhub_findings-*") + .indices( + "logs-wiz.vulnerability-*", + "logs-wiz.cloud_configuration_finding-*", + "logs-google_scc.finding-*", + "logs-aws.securityhub_findings-*", + "logs-aws.inspector-*", + "logs-amazon_security_lake.findings-*", + "logs-qualys_vmdr.asset_host_detection-*", + "logs-tenable_sc.vulnerability-*", + "logs-tenable_io.vulnerability-*", + "logs-rapid7_insightvm.vulnerability-*", + "logs-carbon_black_cloud.asset_vulnerability_summary-*" + ) .privileges("read", "view_index_metadata") .build(), // For alias indices of the Cloud Detection & Response (CDR) packages that ships a diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java index 66ab5755f9392..3dce786c700dd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsageTests.java @@ -7,21 +7,25 @@ package org.elasticsearch.xpack.core.datastreams; +import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; +import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.test.ESTestCase; +import java.util.HashMap; import java.util.List; -import java.util.LongSummaryStatistics; +import java.util.Map; import java.util.UUID; import static org.elasticsearch.xpack.core.action.DataStreamLifecycleUsageTransportAction.calculateStats; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; public class DataStreamLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase { @@ -32,68 +36,69 @@ protected DataStreamLifecycleFeatureSetUsage createTestInstance() { ? new DataStreamLifecycleFeatureSetUsage( new DataStreamLifecycleFeatureSetUsage.LifecycleStats( randomNonNegativeLong(), - randomNonNegativeLong(), - randomNonNegativeLong(), - randomDouble(), - randomBoolean() + randomBoolean(), + generateRetentionStats(), + generateRetentionStats(), + randomBoolean() ? Map.of() : Map.of("default", generateGlobalRetention(), "max", generateGlobalRetention()) ) ) : DataStreamLifecycleFeatureSetUsage.DISABLED; } + private static DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats generateGlobalRetention() { + return new DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats(randomNonNegativeLong(), randomNonNegativeLong()); + } + + private static DataStreamLifecycleFeatureSetUsage.RetentionStats generateRetentionStats() { + return randomBoolean() + ? DataStreamLifecycleFeatureSetUsage.RetentionStats.NO_DATA + : new DataStreamLifecycleFeatureSetUsage.RetentionStats( + randomNonNegativeLong(), + randomDoubleBetween(0.0, 110.0, false), + randomNonNegativeLong(), + randomNonNegativeLong() + ); + } + @Override protected DataStreamLifecycleFeatureSetUsage mutateInstance(DataStreamLifecycleFeatureSetUsage instance) { if (instance.equals(DataStreamLifecycleFeatureSetUsage.DISABLED)) { return new DataStreamLifecycleFeatureSetUsage(DataStreamLifecycleFeatureSetUsage.LifecycleStats.INITIAL); } - return switch (randomInt(4)) { - case 0 -> new DataStreamLifecycleFeatureSetUsage( - new DataStreamLifecycleFeatureSetUsage.LifecycleStats( - randomValueOtherThan(instance.lifecycleStats.dataStreamsWithLifecyclesCount, ESTestCase::randomLong), - instance.lifecycleStats.minRetentionMillis, - instance.lifecycleStats.maxRetentionMillis, - instance.lifecycleStats.averageRetentionMillis, - instance.lifecycleStats.defaultRolloverUsed - ) + var count = instance.lifecycleStats.dataStreamsWithLifecyclesCount; + var defaultRollover = instance.lifecycleStats.defaultRolloverUsed; + var dataRetentionStats = instance.lifecycleStats.dataRetentionStats; + var effectiveRetentionStats = instance.lifecycleStats.effectiveRetentionStats; + var maxRetention = instance.lifecycleStats.globalRetentionStats.get("max"); + var defaultRetention = instance.lifecycleStats.globalRetentionStats.get("default"); + switch (randomInt(5)) { + case 0 -> count += (count > 0 ? -1 : 1); + case 1 -> defaultRollover = defaultRollover == false; + case 2 -> dataRetentionStats = randomValueOtherThan( + dataRetentionStats, + DataStreamLifecycleFeatureSetUsageTests::generateRetentionStats ); - case 1 -> new DataStreamLifecycleFeatureSetUsage( - new DataStreamLifecycleFeatureSetUsage.LifecycleStats( - instance.lifecycleStats.dataStreamsWithLifecyclesCount, - randomValueOtherThan(instance.lifecycleStats.minRetentionMillis, ESTestCase::randomLong), - instance.lifecycleStats.maxRetentionMillis, - instance.lifecycleStats.averageRetentionMillis, - instance.lifecycleStats.defaultRolloverUsed - ) - ); - case 2 -> new DataStreamLifecycleFeatureSetUsage( - new DataStreamLifecycleFeatureSetUsage.LifecycleStats( - instance.lifecycleStats.dataStreamsWithLifecyclesCount, - instance.lifecycleStats.minRetentionMillis, - randomValueOtherThan(instance.lifecycleStats.maxRetentionMillis, ESTestCase::randomLong), - instance.lifecycleStats.averageRetentionMillis, - instance.lifecycleStats.defaultRolloverUsed - ) - ); - case 3 -> new DataStreamLifecycleFeatureSetUsage( - new DataStreamLifecycleFeatureSetUsage.LifecycleStats( - instance.lifecycleStats.dataStreamsWithLifecyclesCount, - instance.lifecycleStats.minRetentionMillis, - instance.lifecycleStats.maxRetentionMillis, - randomValueOtherThan(instance.lifecycleStats.averageRetentionMillis, ESTestCase::randomDouble), - instance.lifecycleStats.defaultRolloverUsed - ) + case 3 -> effectiveRetentionStats = randomValueOtherThan( + effectiveRetentionStats, + DataStreamLifecycleFeatureSetUsageTests::generateRetentionStats ); - case 4 -> new DataStreamLifecycleFeatureSetUsage( - new DataStreamLifecycleFeatureSetUsage.LifecycleStats( - instance.lifecycleStats.dataStreamsWithLifecyclesCount, - instance.lifecycleStats.minRetentionMillis, - instance.lifecycleStats.maxRetentionMillis, - instance.lifecycleStats.averageRetentionMillis, - instance.lifecycleStats.defaultRolloverUsed == false - ) + case 4 -> maxRetention = randomValueOtherThan(maxRetention, DataStreamLifecycleFeatureSetUsageTests::generateGlobalRetention); + case 5 -> defaultRetention = randomValueOtherThan( + defaultRetention, + DataStreamLifecycleFeatureSetUsageTests::generateGlobalRetention ); default -> throw new RuntimeException("unreachable"); - }; + } + Map map = new HashMap<>(); + if (defaultRetention != null) { + map.put("default", defaultRetention); + } + if (maxRetention != null) { + map.put("max", maxRetention); + } + return new DataStreamLifecycleFeatureSetUsage( + new DataStreamLifecycleFeatureSetUsage.LifecycleStats(count, defaultRollover, dataRetentionStats, effectiveRetentionStats, map) + ); } public void testLifecycleStats() { @@ -112,7 +117,7 @@ public void testLifecycleStats() { 1L, null, false, - new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(1000)), null, true) + new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueSeconds(50)), null, true) ), DataStreamTestHelper.newInstance( randomAlphaOfLength(10), @@ -120,7 +125,7 @@ public void testLifecycleStats() { 1L, null, false, - new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(100)), null, true) + new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(150)), null, true) ), DataStreamTestHelper.newInstance( randomAlphaOfLength(10), @@ -128,7 +133,7 @@ public void testLifecycleStats() { 1L, null, false, - new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueMillis(5000)), null, false) + new DataStreamLifecycle(new DataStreamLifecycle.Retention(TimeValue.timeValueSeconds(5)), null, false) ), DataStreamTestHelper.newInstance( randomAlphaOfLength(10), @@ -140,15 +145,70 @@ public void testLifecycleStats() { ) ); - Tuple stats = calculateStats(dataStreams); - // 3 data streams with an enabled lifecycle - assertThat(stats.v1(), is(3L)); - LongSummaryStatistics longSummaryStatistics = stats.v2(); - assertThat(longSummaryStatistics.getMax(), is(1000L)); - assertThat(longSummaryStatistics.getMin(), is(100L)); - // only counting the ones with an effective retention in the summary statistics - assertThat(longSummaryStatistics.getCount(), is(2L)); - assertThat(longSummaryStatistics.getAverage(), is(550.0)); + // Test empty global retention + { + boolean useDefault = randomBoolean(); + RolloverConfiguration rollover = useDefault + ? DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING.getDefault(Settings.EMPTY) + : new RolloverConfiguration(new RolloverConditions()); + DataStreamLifecycleFeatureSetUsage.LifecycleStats stats = calculateStats(dataStreams, rollover, null); + + assertThat(stats.dataStreamsWithLifecyclesCount, is(3L)); + assertThat(stats.defaultRolloverUsed, is(useDefault)); + // Data retention + assertThat(stats.dataRetentionStats.dataStreamCount(), is(2L)); + assertThat(stats.dataRetentionStats.maxMillis(), is(50_000L)); + assertThat(stats.dataRetentionStats.minMillis(), is(150L)); + assertThat(stats.dataRetentionStats.avgMillis(), is(25_075.0)); + + assertThat(stats.effectiveRetentionStats.dataStreamCount(), is(2L)); + assertThat(stats.effectiveRetentionStats.maxMillis(), is(50_000L)); + assertThat(stats.effectiveRetentionStats.minMillis(), is(150L)); + assertThat(stats.effectiveRetentionStats.avgMillis(), is(25_075.0)); + + assertThat(stats.globalRetentionStats, equalTo(Map.of())); + } + + // Test with global retention + { + boolean useDefault = randomBoolean(); + RolloverConfiguration rollover = useDefault + ? DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING.getDefault(Settings.EMPTY) + : new RolloverConfiguration(new RolloverConditions()); + TimeValue defaultRetention = TimeValue.timeValueSeconds(10); + TimeValue maxRetention = TimeValue.timeValueSeconds(20); + DataStreamLifecycleFeatureSetUsage.LifecycleStats stats = calculateStats( + dataStreams, + rollover, + new DataStreamGlobalRetention(defaultRetention, maxRetention) + ); + + assertThat(stats.dataStreamsWithLifecyclesCount, is(3L)); + assertThat(stats.defaultRolloverUsed, is(useDefault)); + // Data retention + assertThat(stats.dataRetentionStats.dataStreamCount(), is(2L)); + assertThat(stats.dataRetentionStats.maxMillis(), is(50_000L)); + assertThat(stats.dataRetentionStats.minMillis(), is(150L)); + assertThat(stats.dataRetentionStats.avgMillis(), is(25_075.0)); + + // Effective retention + assertThat(stats.effectiveRetentionStats.dataStreamCount(), is(3L)); + assertThat(stats.effectiveRetentionStats.maxMillis(), is(20_000L)); + assertThat(stats.effectiveRetentionStats.minMillis(), is(150L)); + assertThat(stats.effectiveRetentionStats.avgMillis(), is(10_050.0)); + + assertThat( + stats.globalRetentionStats, + equalTo( + Map.of( + "default", + new DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats(1L, 10_000L), + "max", + new DataStreamLifecycleFeatureSetUsage.GlobalRetentionStats(1L, 20_000L) + ) + ) + ); + } } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStepTests.java index 1fb7b7c36827e..f988a6fd5769c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStepTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.core.ilm; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -388,11 +387,7 @@ public void testPerformActionAttrsNoShard() { public void testPerformActionSomeShardsOnlyOnNewNodes() throws Exception { VersionInformation oldVersion = new VersionInformation( - VersionUtils.randomVersionBetween( - random(), - Version.fromId(Version.CURRENT.major * 1_000_000 + 99), - VersionUtils.getPreviousVersion() - ), + VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion()), IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.randomCompatibleVersion(random()) ); @@ -457,11 +452,7 @@ public void testPerformActionSomeShardsOnlyOnNewNodes() throws Exception { public void testPerformActionSomeShardsOnlyOnNewNodesButNewNodesInvalidAttrs() { VersionInformation oldVersion = new VersionInformation( - VersionUtils.randomVersionBetween( - random(), - Version.fromId(Version.CURRENT.major * 1_000_000 + 99), - VersionUtils.getPreviousVersion() - ), + VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion()), IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.randomCompatibleVersion(random()) ); @@ -534,11 +525,7 @@ public void testPerformActionSomeShardsOnlyOnNewNodesButNewNodesInvalidAttrs() { public void testPerformActionNewShardsExistButWithInvalidAttributes() throws Exception { VersionInformation oldVersion = new VersionInformation( - VersionUtils.randomVersionBetween( - random(), - Version.fromId(Version.CURRENT.major * 1_000_000 + 99), - VersionUtils.getPreviousVersion() - ), + VersionUtils.randomCompatibleVersion(random(), VersionUtils.getPreviousVersion()), IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.randomCompatibleVersion(random()) ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 54a5678579ce4..a476bbfb229fb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -1612,7 +1612,15 @@ public void testKibanaSystemRole() { Arrays.asList( "logs-wiz.vulnerability-" + randomAlphaOfLength(randomIntBetween(0, 13)), "logs-wiz.cloud_configuration_finding-" + randomAlphaOfLength(randomIntBetween(0, 13)), - "logs-aws.securityhub_findings-" + randomAlphaOfLength(randomIntBetween(0, 13)) + "logs-google_scc.finding-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-aws.securityhub_findings-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-aws.inspector-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-amazon_security_lake.findings-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-qualys_vmdr.asset_host_detection-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-tenable_sc.vulnerability-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-tenable_io.vulnerability-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-rapid7_insightvm.vulnerability-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-carbon_black_cloud.asset_vulnerability_summary-" + randomAlphaOfLength(randomIntBetween(0, 13)) ).forEach(indexName -> { final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 62f89f650dec2..18872d00d54a0 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -160,7 +160,7 @@ public void testCamelCaseDeprecation() throws IOException { + "} }"; IndexMetadata simpleIndex = IndexMetadata.builder(randomAlphaOfLengthBetween(5, 10)) - .settings(settings(IndexVersions.V_7_0_0)) + .settings(settings(IndexVersions.MINIMUM_COMPATIBLE)) .numberOfShards(1) .numberOfReplicas(1) .putMapping(simpleMapping) diff --git a/x-pack/plugin/downsample/qa/rest/build.gradle b/x-pack/plugin/downsample/qa/rest/build.gradle index 603d69d695cac..ba5ac7b0c7317 100644 --- a/x-pack/plugin/downsample/qa/rest/build.gradle +++ b/x-pack/plugin/downsample/qa/rest/build.gradle @@ -29,14 +29,10 @@ artifacts { tasks.named('yamlRestTest') { usesDefaultDistribution() } -tasks.named('yamlRestTestV7CompatTest') { +tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } } - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("rollup/10_basic/Rollup index", "Downsample for TSDB changed the configuration") -} diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java index d94d609cf3470..365f31f8e5fe1 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureIT.java @@ -267,6 +267,7 @@ public void testNoDisruption() { // GIVEN final DownsampleAction.Request downsampleRequest = new DownsampleAction.Request( + TEST_REQUEST_TIMEOUT, SOURCE_INDEX_NAME, TARGET_INDEX_NAME, WAIT_TIMEOUT, @@ -294,6 +295,7 @@ public void testDownsampleActionExceptionDisruption() { // GIVEN final MockTransportService coordinator = MockTransportService.getInstance(testCluster.coordinator); final DownsampleAction.Request downsampleRequest = new DownsampleAction.Request( + TEST_REQUEST_TIMEOUT, SOURCE_INDEX_NAME, TARGET_INDEX_NAME, WAIT_TIMEOUT, diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java index 8324265c3a786..eb8dfe72850a2 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; @@ -40,6 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient config = DownsampleConfig.fromXContent(parser); } DownsampleAction.Request request = new DownsampleAction.Request( + RestUtils.getMasterNodeTimeout(restRequest), sourceIndex, targetIndex, TimeValue.parseTimeValue(timeout, null, "wait_timeout"), diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 6a615d648a850..812b48ee4cae5 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -594,7 +594,7 @@ public void onFailure(Exception e) { }; client().execute( DownsampleAction.INSTANCE, - new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config), + new DownsampleAction.Request(TEST_REQUEST_TIMEOUT, sourceIndex, downsampleIndex, TIMEOUT, config), downsampleListener ); assertBusy(() -> { @@ -607,7 +607,10 @@ public void onFailure(Exception e) { assertBusy(() -> { try { - client().execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)); + client().execute( + DownsampleAction.INSTANCE, + new DownsampleAction.Request(TEST_REQUEST_TIMEOUT, sourceIndex, downsampleIndex, TIMEOUT, config) + ); } catch (ElasticsearchException e) { fail("transient failure due to overlapping downsample operations"); } @@ -1145,7 +1148,10 @@ private void prepareSourceIndex(final String sourceIndex, boolean blockWrite) { private void downsample(String sourceIndex, String downsampleIndex, DownsampleConfig config) { assertAcked( - client().execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)) + client().execute( + DownsampleAction.INSTANCE, + new DownsampleAction.Request(TEST_REQUEST_TIMEOUT, sourceIndex, downsampleIndex, TIMEOUT, config) + ) ); } diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java index 834a1e887caa7..c705b3c6a98d3 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java @@ -98,6 +98,7 @@ public void testDataStreamDownsample() throws ExecutionException, InterruptedExc // WHEN (simulate downsampling as done by an ILM action) final String downsampleTargetIndex = DataStream.BACKING_INDEX_PREFIX + dataStreamName + "-downsample-1h"; final DownsampleAction.Request downsampleRequest = new DownsampleAction.Request( + TEST_REQUEST_TIMEOUT, rolloverResponse.getOldIndex(), downsampleTargetIndex, TIMEOUT, diff --git a/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle b/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle index b0f1e8bd026b0..eb14d6fe46958 100644 --- a/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle +++ b/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle @@ -6,8 +6,6 @@ * Side Public License, v 1. */ -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -21,9 +19,6 @@ dependencies { javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"), "javaRestTest")) } -assert Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() == 8: - "If we are targeting a branch other than 8, we should enable migration tests" - BuildParams.bwcVersions.withWireCompatible(v -> v.after("8.8.0")) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) diff --git a/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java b/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java index e01b8de941851..5e3fd5eb6d112 100644 --- a/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java +++ b/x-pack/plugin/ent-search/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/application/FullClusterRestartIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ObjectPath; @@ -25,6 +26,7 @@ import static org.elasticsearch.Version.V_8_12_0; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +@UpdateForV9 // Investigate what needs to be added in terms of 9.0 migration testing public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { // DSL was introduced with version 8.12.0 of ES. private static final Version DSL_DEFAULT_RETENTION_VERSION = V_8_12_0; diff --git a/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EsEQLCorrectnessIT.java b/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EsEQLCorrectnessIT.java index 1d5ed1ffdcbab..478e2f93db25f 100644 --- a/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EsEQLCorrectnessIT.java +++ b/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EsEQLCorrectnessIT.java @@ -13,6 +13,7 @@ import org.apache.http.HttpHost; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.apache.lucene.tests.util.TimeUnits; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; import org.elasticsearch.client.Request; @@ -45,6 +46,7 @@ @TimeoutSuite(millis = 30 * TimeUnits.MINUTE) @TestLogging(value = "org.elasticsearch.xpack.eql.EsEQLCorrectnessIT:INFO", reason = "Log query execution time") +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/112572") public class EsEQLCorrectnessIT extends ESRestTestCase { private static final String PARAM_FORMATTING = "%1$s"; diff --git a/x-pack/plugin/eql/qa/rest/build.gradle b/x-pack/plugin/eql/qa/rest/build.gradle index d035005758a54..d5b0cc42091f3 100644 --- a/x-pack/plugin/eql/qa/rest/build.gradle +++ b/x-pack/plugin/eql/qa/rest/build.gradle @@ -26,18 +26,10 @@ tasks.named('javaRestTest') { tasks.named('yamlRestTest') { usesDefaultDistribution() } -tasks.named('yamlRestTestV7CompatTest') { +tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } -tasks.named("yamlRestTestV7CompatTransform").configure {task -> - task.skipTest("eql/10_basic/Execute EQL events query with wildcard (*) fields filtering.", "Change of locale with Java 23 makes these tests non deterministic") - task.skipTest("eql/10_basic/Execute EQL sequence with fields filtering.", "Change of locale with Java 23 makes these tests non deterministic") - task.skipTest("eql/10_basic/Execute EQL sequence with custom format for timestamp field.", "Change of locale with Java 23 makes these tests non deterministic") - task.skipTest("eql/10_basic/Execute EQL events query with fields filtering", "Change of locale with Java 23 makes these tests non deterministic") - task.skipTest("eql/10_basic/Execute EQL sequence with wildcard (*) fields filtering.", "Change of locale with Java 23 makes these tests non deterministic") -} - if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } diff --git a/x-pack/plugin/esql/qa/testFixtures/build.gradle b/x-pack/plugin/esql/qa/testFixtures/build.gradle index e8a95011100f5..b6ed610406631 100644 --- a/x-pack/plugin/esql/qa/testFixtures/build.gradle +++ b/x-pack/plugin/esql/qa/testFixtures/build.gradle @@ -2,16 +2,18 @@ apply plugin: 'elasticsearch.java' apply plugin: org.elasticsearch.gradle.dependencies.CompileOnlyResolvePlugin dependencies { - implementation project(':x-pack:plugin:esql:compute') - implementation project(':x-pack:plugin:esql') - compileOnly project(path: xpackModule('core')) - implementation project(":libs:elasticsearch-x-content") - implementation project(':client:rest') - implementation project(':libs:elasticsearch-logging') - implementation project(':test:framework') - api(testArtifact(project(xpackModule('esql-core')))) - implementation project(':server') - implementation "net.sf.supercsv:super-csv:${versions.supercsv}" + implementation project(':x-pack:plugin:esql:compute') + implementation project(':x-pack:plugin:esql') + compileOnly project(path: xpackModule('core')) + implementation project(":libs:elasticsearch-x-content") + implementation project(':client:rest') + implementation project(':libs:elasticsearch-logging') + implementation project(':test:framework') + api(testArtifact(project(xpackModule('esql-core')))) + implementation project(':server') + implementation "net.sf.supercsv:super-csv:${versions.supercsv}" + implementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" } /** diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index c934a8926ee7e..70a054f233a3c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -118,7 +118,7 @@ public static Tuple skipVersionRange(String testName, String i return null; } - public static Tuple> loadPageFromCsv(URL source) throws Exception { + public static Tuple> loadPageFromCsv(URL source, Map typeMapping) throws Exception { record CsvColumn(String name, Type type, BuilderWrapper builderWrapper) implements Releasable { void append(String stringValue) { @@ -164,21 +164,16 @@ public void close() { if (columns == null) { columns = new CsvColumn[entries.length]; for (int i = 0; i < entries.length; i++) { - int split = entries[i].indexOf(':'); - String name, typeName; + String[] header = entries[i].split(":"); + String name = header[0].trim(); + String typeName = (typeMapping != null && typeMapping.containsKey(name)) ? typeMapping.get(name) + : header.length > 1 ? header[1].trim() + : null; - if (split < 0) { + if (typeName == null || typeName.isEmpty()) { throw new IllegalArgumentException( "A type is always expected in the schema definition; found " + entries[i] ); - } else { - name = entries[i].substring(0, split).trim(); - typeName = entries[i].substring(split + 1).trim(); - if (typeName.length() == 0) { - throw new IllegalArgumentException( - "A type is always expected in the schema definition; found " + entries[i] - ); - } } Type type = Type.asType(typeName); if (type == null) { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 9ee22113a4244..068adf190653a 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -6,6 +6,10 @@ */ package org.elasticsearch.xpack.esql; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; + import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.auth.AuthScope; @@ -17,20 +21,13 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; -import org.elasticsearch.cluster.ClusterModule; -import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContent; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.BufferedReader; @@ -51,66 +48,43 @@ public class CsvTestsDataLoader { private static final int BULK_DATA_SIZE = 100_000; - private static final TestsDataset EMPLOYEES = new TestsDataset("employees", "mapping-default.json", "employees.csv", null, false); - private static final TestsDataset HOSTS = new TestsDataset("hosts", "mapping-hosts.json", "hosts.csv"); - private static final TestsDataset APPS = new TestsDataset("apps", "mapping-apps.json", "apps.csv"); - private static final TestsDataset LANGUAGES = new TestsDataset("languages", "mapping-languages.json", "languages.csv"); - private static final TestsDataset ALERTS = new TestsDataset("alerts", "mapping-alerts.json", "alerts.csv"); - private static final TestsDataset UL_LOGS = new TestsDataset("ul_logs", "mapping-ul_logs.json", "ul_logs.csv"); - private static final TestsDataset SAMPLE_DATA = new TestsDataset("sample_data", "mapping-sample_data.json", "sample_data.csv"); - private static final TestsDataset SAMPLE_DATA_STR = new TestsDataset( - "sample_data_str", - "mapping-sample_data_str.json", - "sample_data_str.csv" - ); - private static final TestsDataset SAMPLE_DATA_TS_LONG = new TestsDataset( - "sample_data_ts_long", - "mapping-sample_data_ts_long.json", - "sample_data_ts_long.csv" - ); - private static final TestsDataset MISSING_IP_SAMPLE_DATA = new TestsDataset( - "missing_ip_sample_data", - "mapping-missing_ip_sample_data.json", - "missing_ip_sample_data.csv" - ); - private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips", "mapping-clientips.json", "clientips.csv"); - private static final TestsDataset CLIENT_CIDR = new TestsDataset("client_cidr", "mapping-client_cidr.json", "client_cidr.csv"); - private static final TestsDataset AGES = new TestsDataset("ages", "mapping-ages.json", "ages.csv"); - private static final TestsDataset HEIGHTS = new TestsDataset("heights", "mapping-heights.json", "heights.csv"); - private static final TestsDataset DECADES = new TestsDataset("decades", "mapping-decades.json", "decades.csv"); - private static final TestsDataset AIRPORTS = new TestsDataset("airports", "mapping-airports.json", "airports.csv"); - private static final TestsDataset AIRPORTS_MP = new TestsDataset("airports_mp", "mapping-airports.json", "airports_mp.csv"); - private static final TestsDataset AIRPORTS_WEB = new TestsDataset("airports_web", "mapping-airports_web.json", "airports_web.csv"); - private static final TestsDataset DATE_NANOS = new TestsDataset("date_nanos", "mapping-date_nanos.json", "date_nanos.csv"); - private static final TestsDataset COUNTRIES_BBOX = new TestsDataset( - "countries_bbox", - "mapping-countries_bbox.json", - "countries_bbox.csv" - ); - private static final TestsDataset COUNTRIES_BBOX_WEB = new TestsDataset( - "countries_bbox_web", - "mapping-countries_bbox_web.json", - "countries_bbox_web.csv" - ); - private static final TestsDataset AIRPORT_CITY_BOUNDARIES = new TestsDataset( - "airport_city_boundaries", - "mapping-airport_city_boundaries.json", - "airport_city_boundaries.csv" - ); - private static final TestsDataset CARTESIAN_MULTIPOLYGONS = new TestsDataset( - "cartesian_multipolygons", - "mapping-cartesian_multipolygons.json", - "cartesian_multipolygons.csv" - ); - private static final TestsDataset DISTANCES = new TestsDataset("distances", "mapping-distances.json", "distances.csv"); - private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv", "k8s-settings.json", true); - private static final TestsDataset ADDRESSES = new TestsDataset("addresses", "mapping-addresses.json", "addresses.csv", null, true); - private static final TestsDataset BOOKS = new TestsDataset("books", "mapping-books.json", "books.csv", null, true); + private static final TestsDataset EMPLOYEES = new TestsDataset("employees", "mapping-default.json", "employees.csv").noSubfields(); + private static final TestsDataset HOSTS = new TestsDataset("hosts"); + private static final TestsDataset APPS = new TestsDataset("apps"); + private static final TestsDataset APPS_SHORT = APPS.withIndex("apps_short").withTypeMapping(Map.of("id", "short")); + private static final TestsDataset LANGUAGES = new TestsDataset("languages"); + private static final TestsDataset ALERTS = new TestsDataset("alerts"); + private static final TestsDataset UL_LOGS = new TestsDataset("ul_logs"); + private static final TestsDataset SAMPLE_DATA = new TestsDataset("sample_data"); + private static final TestsDataset SAMPLE_DATA_STR = SAMPLE_DATA.withIndex("sample_data_str") + .withTypeMapping(Map.of("client_ip", "keyword")); + private static final TestsDataset SAMPLE_DATA_TS_LONG = SAMPLE_DATA.withIndex("sample_data_ts_long") + .withData("sample_data_ts_long.csv") + .withTypeMapping(Map.of("@timestamp", "long")); + private static final TestsDataset MISSING_IP_SAMPLE_DATA = new TestsDataset("missing_ip_sample_data"); + private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips"); + private static final TestsDataset CLIENT_CIDR = new TestsDataset("client_cidr"); + private static final TestsDataset AGES = new TestsDataset("ages"); + private static final TestsDataset HEIGHTS = new TestsDataset("heights"); + private static final TestsDataset DECADES = new TestsDataset("decades"); + private static final TestsDataset AIRPORTS = new TestsDataset("airports"); + private static final TestsDataset AIRPORTS_MP = AIRPORTS.withIndex("airports_mp").withData("airports_mp.csv"); + private static final TestsDataset AIRPORTS_WEB = new TestsDataset("airports_web"); + private static final TestsDataset DATE_NANOS = new TestsDataset("date_nanos"); + private static final TestsDataset COUNTRIES_BBOX = new TestsDataset("countries_bbox"); + private static final TestsDataset COUNTRIES_BBOX_WEB = new TestsDataset("countries_bbox_web"); + private static final TestsDataset AIRPORT_CITY_BOUNDARIES = new TestsDataset("airport_city_boundaries"); + private static final TestsDataset CARTESIAN_MULTIPOLYGONS = new TestsDataset("cartesian_multipolygons"); + private static final TestsDataset DISTANCES = new TestsDataset("distances"); + private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv").withSetting("k8s-settings.json"); + private static final TestsDataset ADDRESSES = new TestsDataset("addresses"); + private static final TestsDataset BOOKS = new TestsDataset("books"); public static final Map CSV_DATASET_MAP = Map.ofEntries( Map.entry(EMPLOYEES.indexName, EMPLOYEES), Map.entry(HOSTS.indexName, HOSTS), Map.entry(APPS.indexName, APPS), + Map.entry(APPS_SHORT.indexName, APPS_SHORT), Map.entry(LANGUAGES.indexName, LANGUAGES), Map.entry(UL_LOGS.indexName, UL_LOGS), Map.entry(SAMPLE_DATA.indexName, SAMPLE_DATA), @@ -258,18 +232,8 @@ public static void loadDataSetIntoEs(RestClient client, Logger logger) throws IO } private static void loadDataSetIntoEs(RestClient client, Logger logger, IndexCreator indexCreator) throws IOException { - for (var dataSet : CSV_DATASET_MAP.values()) { - final String settingName = dataSet.settingFileName != null ? "/" + dataSet.settingFileName : null; - load( - client, - dataSet.indexName, - "/" + dataSet.mappingFileName, - settingName, - "/" + dataSet.dataFileName, - dataSet.allowSubFields, - logger, - indexCreator - ); + for (var dataset : CSV_DATASET_MAP.values()) { + load(client, dataset, logger, indexCreator); } forceMerge(client, CSV_DATASET_MAP.keySet(), logger); for (var policy : ENRICH_POLICIES) { @@ -291,32 +255,51 @@ private static void loadEnrichPolicy(RestClient client, String policyName, Strin client.performRequest(request); } - private static void load( - RestClient client, - String indexName, - String mappingName, - String settingName, - String dataName, - boolean allowSubFields, - Logger logger, - IndexCreator indexCreator - ) throws IOException { + private static void load(RestClient client, TestsDataset dataset, Logger logger, IndexCreator indexCreator) throws IOException { + final String mappingName = "/" + dataset.mappingFileName; URL mapping = CsvTestsDataLoader.class.getResource(mappingName); if (mapping == null) { throw new IllegalArgumentException("Cannot find resource " + mappingName); } + final String dataName = "/" + dataset.dataFileName; URL data = CsvTestsDataLoader.class.getResource(dataName); if (data == null) { throw new IllegalArgumentException("Cannot find resource " + dataName); } Settings indexSettings = Settings.EMPTY; + final String settingName = dataset.settingFileName != null ? "/" + dataset.settingFileName : null; if (settingName != null) { indexSettings = Settings.builder() .loadFromStream(settingName, CsvTestsDataLoader.class.getResourceAsStream(settingName), false) .build(); } - indexCreator.createIndex(client, indexName, readTextFile(mapping), indexSettings); - loadCsvData(client, indexName, data, allowSubFields, CsvTestsDataLoader::createParser, logger); + indexCreator.createIndex(client, dataset.indexName, readMappingFile(mapping, dataset.typeMapping), indexSettings); + loadCsvData(client, dataset.indexName, data, dataset.allowSubFields, logger); + } + + private static String readMappingFile(URL resource, Map typeMapping) throws IOException { + String mappingJsonText = readTextFile(resource); + if (typeMapping == null || typeMapping.isEmpty()) { + return mappingJsonText; + } + boolean modified = false; + ObjectMapper mapper = new ObjectMapper(); + JsonNode mappingNode = mapper.readTree(mappingJsonText); + JsonNode propertiesNode = mappingNode.path("properties"); + + for (Map.Entry entry : typeMapping.entrySet()) { + String key = entry.getKey(); + String newType = entry.getValue(); + + if (propertiesNode.has(key)) { + modified = true; + ((ObjectNode) propertiesNode.get(key)).put("type", newType); + } + } + if (modified) { + return mapper.writerWithDefaultPrettyPrinter().writeValueAsString(mappingNode); + } + return mappingJsonText; } public static String readTextFile(URL resource) throws IOException { @@ -345,14 +328,8 @@ public static String readTextFile(URL resource) throws IOException { * - multi-values are comma separated * - commas inside multivalue fields can be escaped with \ (backslash) character */ - private static void loadCsvData( - RestClient client, - String indexName, - URL resource, - boolean allowSubFields, - CheckedBiFunction p, - Logger logger - ) throws IOException { + private static void loadCsvData(RestClient client, String indexName, URL resource, boolean allowSubFields, Logger logger) + throws IOException { ArrayList failures = new ArrayList<>(); StringBuilder builder = new StringBuilder(); try (BufferedReader reader = reader(resource)) { @@ -371,27 +348,17 @@ private static void loadCsvData( columns = new String[entries.length]; for (int i = 0; i < entries.length; i++) { int split = entries[i].indexOf(':'); - String name, typeName; - if (split < 0) { - throw new IllegalArgumentException( - "A type is always expected in the schema definition; found " + entries[i] - ); + columns[i] = entries[i].trim(); } else { - name = entries[i].substring(0, split).trim(); + String name = entries[i].substring(0, split).trim(); if (allowSubFields || name.contains(".") == false) { - typeName = entries[i].substring(split + 1).trim(); - if (typeName.isEmpty()) { - throw new IllegalArgumentException( - "A type is always expected in the schema definition; found " + entries[i] - ); - } + columns[i] = name; } else {// if it's a subfield, ignore it in the _bulk request - name = null; + columns[i] = null; subFieldsIndices.add(i); } } - columns[i] = name; } } // data rows @@ -534,22 +501,40 @@ private static void forceMerge(RestClient client, Set indices, Logger lo } } - private static XContentParser createParser(XContent xContent, InputStream data) throws IOException { - NamedXContentRegistry contentRegistry = new NamedXContentRegistry(ClusterModule.getNamedXWriteables()); - XContentParserConfiguration config = XContentParserConfiguration.EMPTY.withRegistry(contentRegistry) - .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); - return xContent.createParser(config, data); - } - public record TestsDataset( String indexName, String mappingFileName, String dataFileName, String settingFileName, - boolean allowSubFields + boolean allowSubFields, + Map typeMapping ) { public TestsDataset(String indexName, String mappingFileName, String dataFileName) { - this(indexName, mappingFileName, dataFileName, null, true); + this(indexName, mappingFileName, dataFileName, null, true, null); + } + + public TestsDataset(String indexName) { + this(indexName, "mapping-" + indexName + ".json", indexName + ".csv", null, true, null); + } + + public TestsDataset withIndex(String indexName) { + return new TestsDataset(indexName, mappingFileName, dataFileName, settingFileName, allowSubFields, typeMapping); + } + + public TestsDataset withData(String dataFileName) { + return new TestsDataset(indexName, mappingFileName, dataFileName, settingFileName, allowSubFields, typeMapping); + } + + public TestsDataset withSetting(String settingFileName) { + return new TestsDataset(indexName, mappingFileName, dataFileName, settingFileName, allowSubFields, typeMapping); + } + + public TestsDataset noSubfields() { + return new TestsDataset(indexName, mappingFileName, dataFileName, settingFileName, false, typeMapping); + } + + public TestsDataset withTypeMapping(Map typeMapping) { + return new TestsDataset(indexName, mappingFileName, dataFileName, settingFileName, allowSubFields, typeMapping); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_str.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_str.json deleted file mode 100644 index 9e97de8c92928..0000000000000 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_str.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "properties": { - "@timestamp": { - "type": "date" - }, - "client_ip": { - "type": "keyword" - }, - "event_duration": { - "type": "long" - }, - "message": { - "type": "keyword" - } - } -} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_ts_long.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_ts_long.json deleted file mode 100644 index ecf21a2a919d0..0000000000000 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-sample_data_ts_long.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "properties": { - "@timestamp": { - "type": "long" - }, - "client_ip": { - "type": "ip" - }, - "event_duration": { - "type": "long" - }, - "message": { - "type": "keyword" - } - } -} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_str.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_str.csv deleted file mode 100644 index bc98671adc7ff..0000000000000 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_str.csv +++ /dev/null @@ -1,8 +0,0 @@ -@timestamp:date,client_ip:keyword,event_duration:long,message:keyword -2023-10-23T13:55:01.543Z,172.21.3.15,1756467,Connected to 10.1.0.1 -2023-10-23T13:53:55.832Z,172.21.3.15,5033755,Connection error -2023-10-23T13:52:55.015Z,172.21.3.15,8268153,Connection error -2023-10-23T13:51:54.732Z,172.21.3.15,725448,Connection error -2023-10-23T13:33:34.937Z,172.21.0.5,1232382,Disconnected -2023-10-23T12:27:28.948Z,172.21.2.113,2764889,Connected to 10.1.0.2 -2023-10-23T12:15:03.360Z,172.21.2.162,3450233,Connected to 10.1.0.3 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 02a2cac0513c0..d59dda273ed6e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -1314,7 +1314,7 @@ COUNT(c):long | a:integer 0 | 1 ; -countDistinctNull#[skip:-8.99.99,reason:not yet fixed] +countDistinctNull#[skip:-9.99.99,reason:not yet fixed] ROW a = 1, c = null | STATS COUNT_DISTINCT(c) BY a; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index c6a2d47a78dc9..3218962678d9f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -1351,3 +1351,54 @@ FROM sample_data, sample_data_ts_long null | 172.21.0.5 | 1232382 | Disconnected | Disconnected null | 172.21.0.5 | 1232382 | Disconnected | Disconnected ; + +shortIntegerWidening +required_capability: union_types +required_capability: metadata_fields +required_capability: casting_operator +required_capability: union_types_numeric_widening + +FROM apps, apps_short METADATA _index +| EVAL id = id::integer +| KEEP _index, id, version, name +| WHERE name == "aaaaa" OR name == "hhhhh" +| SORT _index ASC, id ASC +; + +_index:keyword | id:integer | version:version | name:keyword +apps | 1 | 1 | aaaaa +apps | 8 | 1.2.3.4 | hhhhh +apps | 12 | 1.2.3.4 | aaaaa +apps_short | 1 | 1 | aaaaa +apps_short | 8 | 1.2.3.4 | hhhhh +apps_short | 12 | 1.2.3.4 | aaaaa +; + +shortIntegerWideningStats +required_capability: union_types +required_capability: casting_operator +required_capability: union_types_numeric_widening + +FROM apps, apps_short +| EVAL id = id::integer +| STATS count=count() BY name, id +| KEEP id, name, count +| SORT id ASC, name ASC +; + +id:integer | name:keyword | count:long +1 | aaaaa | 2 +2 | bbbbb | 2 +3 | ccccc | 2 +4 | ddddd | 2 +5 | eeeee | 2 +6 | fffff | 2 +7 | ggggg | 2 +8 | hhhhh | 2 +9 | iiiii | 2 +10 | jjjjj | 2 +11 | kkkkk | 2 +12 | aaaaa | 2 +13 | lllll | 2 +14 | mmmmm | 2 +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index c0c5ebf010ffd..475e63733022d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -198,6 +198,11 @@ public enum Cap { */ UNION_TYPES_MISSING_FIELD, + /** + * Fix for widening of short numeric types in union-types. Done in #112610 + */ + UNION_TYPES_NUMERIC_WIDENING, + /** * Fix a parsing issue where numbers below Long.MIN_VALUE threw an exception instead of parsing as doubles. * see Parsing large numbers is inconsistent #104323 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 4f9ef3df29a85..9288e1cf81a15 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -115,7 +115,6 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; import static org.elasticsearch.xpack.esql.core.type.DataType.isTemporalAmount; import static org.elasticsearch.xpack.esql.stats.FeatureMetric.LIMIT; @@ -1223,8 +1222,7 @@ private Expression resolveConvertFunction(AbstractConvertFunction convert, List< HashMap typeResolutions = new HashMap<>(); Set supportedTypes = convert.supportedTypes(); imf.types().forEach(type -> { - // TODO: Shouldn't we perform widening of small numerical types here? - if (supportedTypes.contains(type)) { + if (supportedTypes.contains(type.widenSmallNumeric())) { TypeResolutionKey key = new TypeResolutionKey(fa.name(), type); var concreteConvert = typeSpecificConvert(convert, fa.source(), type, imf); typeResolutions.put(key, concreteConvert); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java index 2795ac857983c..f3ce6b1465d6b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java @@ -63,7 +63,7 @@ protected AbstractConvertFunction(StreamInput in) throws IOException { * Build the evaluator given the evaluator a multivalued field. */ protected final ExpressionEvaluator.Factory evaluator(ExpressionEvaluator.Factory fieldEval) { - DataType sourceType = field().dataType(); + DataType sourceType = field().dataType().widenSmallNumeric(); var factory = factories().get(sourceType); if (factory == null) { throw EsqlIllegalArgumentException.illegalDataType(sourceType); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index a7d8c98a606b5..faf9d04532f1a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -54,6 +54,8 @@ import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.PreAnalyzer; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; @@ -308,8 +310,18 @@ protected void assertResults(ExpectedResults expected, ActualResults actual, boo // CsvTestUtils.logData(actual.values(), LOGGER); } - private static IndexResolution loadIndexResolution(String mappingName, String indexName) { + private static IndexResolution loadIndexResolution(String mappingName, String indexName, Map typeMapping) { var mapping = new TreeMap<>(loadMapping(mappingName)); + if ((typeMapping == null || typeMapping.isEmpty()) == false) { + for (var entry : typeMapping.entrySet()) { + if (mapping.containsKey(entry.getKey())) { + DataType dataType = DataType.fromTypeName(entry.getValue()); + EsField field = mapping.get(entry.getKey()); + EsField editedField = new EsField(field.getName(), dataType, field.getProperties(), field.isAggregatable()); + mapping.put(entry.getKey(), editedField); + } + } + } return IndexResolution.valid(new EsIndex(indexName, mapping, Map.of(indexName, IndexMode.STANDARD))); } @@ -320,7 +332,7 @@ private static EnrichResolution loadEnrichPolicies() { CsvTestsDataLoader.TestsDataset sourceIndex = CSV_DATASET_MAP.get(policy.getIndices().get(0)); // this could practically work, but it's wrong: // EnrichPolicyResolution should contain the policy (system) index, not the source index - EsIndex esIndex = loadIndexResolution(sourceIndex.mappingFileName(), sourceIndex.indexName()).get(); + EsIndex esIndex = loadIndexResolution(sourceIndex.mappingFileName(), sourceIndex.indexName(), null).get(); var concreteIndices = Map.of(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(esIndex.concreteIndices(), 0)); enrichResolution.addResolvedPolicy( policyConfig.policyName(), @@ -349,7 +361,7 @@ private static EnrichPolicy loadEnrichPolicyMapping(String policyFileName) { } private LogicalPlan analyzedPlan(LogicalPlan parsed, CsvTestsDataLoader.TestsDataset dataset) { - var indexResolution = loadIndexResolution(dataset.mappingFileName(), dataset.indexName()); + var indexResolution = loadIndexResolution(dataset.mappingFileName(), dataset.indexName(), dataset.typeMapping()); var enrichPolicies = loadEnrichPolicies(); var analyzer = new Analyzer(new AnalyzerContext(configuration, functionRegistry, indexResolution, enrichPolicies), TEST_VERIFIER); LogicalPlan plan = analyzer.analyze(parsed); @@ -392,7 +404,7 @@ private static CsvTestsDataLoader.TestsDataset testsDataset(LogicalPlan parsed) } private static TestPhysicalOperationProviders testOperationProviders(CsvTestsDataLoader.TestsDataset dataset) throws Exception { - var testData = loadPageFromCsv(CsvTests.class.getResource("/" + dataset.dataFileName())); + var testData = loadPageFromCsv(CsvTests.class.getResource("/" + dataset.dataFileName()), dataset.typeMapping()); return new TestPhysicalOperationProviders(testData.v1(), testData.v2()); } diff --git a/x-pack/plugin/mapper-unsigned-long/build.gradle b/x-pack/plugin/mapper-unsigned-long/build.gradle index 9c04584cdf130..e011723da6230 100644 --- a/x-pack/plugin/mapper-unsigned-long/build.gradle +++ b/x-pack/plugin/mapper-unsigned-long/build.gradle @@ -37,15 +37,6 @@ restResources { } } -tasks.named("yamlRestTestV7CompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - '50_script_values/Scripted fields values return Long', - '50_script_values/Scripted sort values', - '50_script_values/Script query', - '50_script_values/script_score query' - ].join(',') -} - if (BuildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle index b19fa4ab5f185..bc22552d0d734 100644 --- a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle @@ -12,7 +12,7 @@ dependencies { testImplementation project(':x-pack:qa') } -Version ccsCompatVersion = new Version(VersionProperties.getElasticsearchVersion().getMajor(), VersionProperties.getElasticsearchVersion().getMinor() - 1, 0) +Version ccsCompatVersion = BuildParams.bwcVersions.minimumWireCompatibleVersion restResources { restApi { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle index c9e860f27a5d4..19f2e984f6493 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle @@ -17,6 +17,7 @@ dependencies { javaRestTestImplementation project(path: xpackModule('rank-rrf')) javaRestTestImplementation project(path: xpackModule('esql-core')) javaRestTestImplementation project(path: xpackModule('esql')) + javaRestTestImplementation project(path: xpackModule('snapshot-repo-test-kit')) } // location for keys and certificates diff --git a/x-pack/plugin/old-lucene-versions/src/test/java/org/elasticsearch/xpack/lucene/bwc/codecs/OldCodecsAvailableTests.java b/x-pack/plugin/old-lucene-versions/src/test/java/org/elasticsearch/xpack/lucene/bwc/codecs/OldCodecsAvailableTests.java index bf1538b4e5dd8..42b5ba83a0828 100644 --- a/x-pack/plugin/old-lucene-versions/src/test/java/org/elasticsearch/xpack/lucene/bwc/codecs/OldCodecsAvailableTests.java +++ b/x-pack/plugin/old-lucene-versions/src/test/java/org/elasticsearch/xpack/lucene/bwc/codecs/OldCodecsAvailableTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.lucene.bwc.codecs; import org.elasticsearch.Version; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.ESTestCase; public class OldCodecsAvailableTests extends ESTestCase { @@ -16,6 +17,8 @@ public class OldCodecsAvailableTests extends ESTestCase { * Reminder to add Lucene BWC codecs under {@link org.elasticsearch.xpack.lucene.bwc.codecs} whenever Elasticsearch is upgraded * to the next major Lucene version. */ + @UpdateForV9 + @AwaitsFix(bugUrl = "muted until we add bwc codecs as part of lucene 10 upgrade") public void testLuceneBWCCodecsAvailable() { assertEquals("Add Lucene BWC codecs for Elasticsearch version 7", 8, Version.CURRENT.major); } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java index 8c978c3445526..94ba06a00cc4e 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java @@ -538,14 +538,6 @@ protected InputStream openInputStreamFromBlobStore(final long position, final lo private SlicedInputStream openInputStreamMultipleParts(long position, long readLength) { final int startPart = getPartNumberForPosition(position); final int endPart = getPartNumberForPosition(position + readLength - 1); - - for (int currentPart = startPart; currentPart <= endPart; currentPart++) { - final long startInPart = (currentPart == startPart) ? getRelativePositionInPart(position) : 0L; - final long endInPart; - endInPart = currentPart == endPart ? getRelativePositionInPart(position + readLength - 1) + 1 : fileInfo.partBytes(currentPart); - stats.addBlobStoreBytesRequested(endInPart - startInPart); - } - return new SlicedInputStream(endPart - startPart + 1) { @Override protected InputStream openSlice(int slice) throws IOException { @@ -555,8 +547,15 @@ protected InputStream openSlice(int slice) throws IOException { endInPart = currentPart == endPart ? getRelativePositionInPart(position + readLength - 1) + 1 : fileInfo.partBytes(currentPart); + final long length = endInPart - startInPart; + stats.addBlobStoreBytesRequested(length); return directory.blobContainer() - .readBlob(OperationPurpose.SNAPSHOT_DATA, fileInfo.partName(currentPart), startInPart, endInPart - startInPart); + .readBlob(OperationPurpose.SNAPSHOT_DATA, fileInfo.partName(currentPart), startInPart, length); + } + + @Override + public boolean markSupported() { + return false; } }; } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgraderTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgraderTests.java index 594d356becf87..bd090b528bb97 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgraderTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgraderTests.java @@ -7,11 +7,13 @@ package org.elasticsearch.xpack.searchablesnapshots.upgrade; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -27,6 +29,8 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; +@UpdateForV9 +@LuceneTestCase.AwaitsFix(bugUrl = "this testing a number of pre 8.0 upgrade scenarios so needs updating or removal for 9.0") public class SearchableSnapshotIndexMetadataUpgraderTests extends ESTestCase { public void testNoUpgradeNeeded() { diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index c5304d8313df2..853d0fd9318ae 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -98,6 +98,7 @@ public class Constants { "cluster:admin/snapshot/restore", "cluster:admin/snapshot/status", "cluster:admin/snapshot/status[nodes]", + "cluster:admin/repository/verify_integrity", "cluster:admin/features/get", "cluster:admin/features/reset", "cluster:admin/tasks/cancel", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java index 6777c38b809e0..01715cde33cf0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java @@ -10,9 +10,9 @@ import org.elasticsearch.Version; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersion; @@ -25,92 +25,13 @@ import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.XPackSettings; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class SecurityImplicitBehaviorBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - public void testFailureUpgradeFrom7xWithImplicitSecuritySettings() throws Exception { - final BuildVersion previousVersion = toBuildVersion( - randomValueOtherThan( - Version.V_8_0_0, - () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) - ) - ); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); - nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); - ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); - Metadata metadata = createLicensesMetadata( - TrialLicenseVersion.fromXContent(previousVersion.toString()), - randomFrom("basic", "trial") - ); - License license = mock(License.class); - when(licenseService.getLicense(metadata)).thenReturn(license); - when(license.operationMode()).thenReturn(randomFrom(License.OperationMode.BASIC, License.OperationMode.TRIAL)); - BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( - createTestContext(Settings.EMPTY, metadata) - ); - assertThat(result.isFailure(), is(true)); - assertThat( - result.getMessage(), - equalTo( - "The default value for [" - + XPackSettings.SECURITY_ENABLED.getKey() - + "] has changed in the current version. " - + " Security features were implicitly disabled for this node but they would now be enabled, possibly" - + " preventing access to the node. " - + "See " - + ReferenceDocs.BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP - + " to configure security, or explicitly disable security by " - + "setting [xpack.security.enabled] to \"false\" in elasticsearch.yml before restarting the node." - ) - ); - } - - public void testUpgradeFrom7xWithImplicitSecuritySettingsOnGoldPlus() throws Exception { - final BuildVersion previousVersion = toBuildVersion( - randomValueOtherThan( - Version.V_8_0_0, - () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) - ) - ); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); - nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); - ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); - Metadata metadata = createLicensesMetadata( - TrialLicenseVersion.fromXContent(previousVersion.toString()), - randomFrom("gold", "platinum") - ); - License license = mock(License.class); - when(licenseService.getLicense(metadata)).thenReturn(license); - when(license.operationMode()).thenReturn(randomFrom(License.OperationMode.GOLD, License.OperationMode.PLATINUM)); - BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( - createTestContext(Settings.EMPTY, metadata) - ); - assertThat(result.isSuccess(), is(true)); - } - - public void testUpgradeFrom7xWithExplicitSecuritySettings() throws Exception { - final BuildVersion previousVersion = toBuildVersion( - randomValueOtherThan( - Version.V_8_0_0, - () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) - ) - ); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); - nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); - ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); - BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( - createTestContext( - Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build(), - createLicensesMetadata(TrialLicenseVersion.fromXContent(previousVersion.toString()), randomFrom("basic", "trial")) - ) - ); - assertThat(result.isSuccess(), is(true)); - } - + @UpdateForV9 + @AwaitsFix(bugUrl = "requires updates for version 9.0 bump") public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); @@ -125,6 +46,8 @@ public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { assertThat(result.isSuccess(), is(true)); } + @UpdateForV9 + @AwaitsFix(bugUrl = "requires updates for version 9.0 bump") public void testUpgradeFrom8xWithExplicitSecuritySettings() throws Exception { final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); diff --git a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java index 851d5f8f02b2a..449edd8f40ee2 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -202,6 +203,8 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener { boolean shareFilesWithSource = randomBoolean(); @@ -387,6 +390,8 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener { Store.MetadataSnapshot targetMetadataSnapshot = generateRandomTargetState(store); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/20_verify_integrity.yml b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/20_verify_integrity.yml new file mode 100644 index 0000000000000..be6929a15ff44 --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/20_verify_integrity.yml @@ -0,0 +1,39 @@ +--- +setup: + - requires: + cluster_features: "snapshot.repository_verify_integrity" + reason: "required feature" + + - do: + snapshot.create_repository: + repository: test_repo + body: + type: fs + settings: + location: "test_repo_loc" + + - do: + bulk: + index: test + refresh: true + body: + - '{"index":{}}' + - '{}' + + - do: + snapshot.create: + repository: test_repo + snapshot: snap + wait_for_completion: true + +--- +"Integrity verification": + - do: + snapshot.repository_verify_integrity: + repository: test_repo + + - match: {results.result: pass} + - match: {results.status.snapshots.total: 1} + - match: {results.status.snapshots.verified: 1} + - match: {results.status.indices.total: 1} + - match: {results.status.indices.verified: 1} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityIT.java new file mode 100644 index 0000000000000..4b0e0fdbb0955 --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityIT.java @@ -0,0 +1,806 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshotsIntegritySuppressor; +import org.elasticsearch.index.snapshots.blobstore.SnapshotFiles; +import org.elasticsearch.index.store.StoreFileMetadata; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.ShardGenerations; +import org.elasticsearch.repositories.blobstore.BlobStoreCorruptionUtils; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.RepositoryFileType; +import org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKit; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.xcontent.XContentFactory; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.OptionalLong; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; +import java.util.stream.LongStream; +import java.util.stream.StreamSupport; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.INDEX_SHARD_SNAPSHOTS_FORMAT; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.SNAPSHOT_FORMAT; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +public class RepositoryVerifyIntegrityIT extends AbstractSnapshotIntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable HTTP + } + + @SuppressWarnings("unchecked") + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopyNoNullElements( + super.nodePlugins(), + SnapshotRepositoryTestKit.class, + MockTransportService.TestPlugin.class + ); + } + + private static long getCurrentTime(Function summarizer) { + return summarizer.apply( + StreamSupport.stream(internalCluster().getInstances(ThreadPool.class).spliterator(), false) + .mapToLong(ThreadPool::absoluteTimeInMillis) + ).orElseThrow(AssertionError::new); + } + + public void testSuccess() throws IOException { + final var minStartTimeMillis = getCurrentTime(LongStream::min); + final var testContext = createTestContext(); + final var request = testContext.getVerifyIntegrityRequest(); + if (randomBoolean()) { + request.addParameter("verify_blob_contents", null); + } + final var response = getRestClient().performRequest(request); + final var maxEndTimeMillis = getCurrentTime(LongStream::max); + assertEquals(200, response.getStatusLine().getStatusCode()); + final var responseObjectPath = ObjectPath.createFromResponse(response); + final var logEntryCount = responseObjectPath.evaluateArraySize("log"); + final var seenSnapshotNames = new HashSet(); + final var seenIndexNames = new HashSet(); + for (int i = 0; i < logEntryCount; i++) { + assertThat( + responseObjectPath.evaluate("log." + i + ".timestamp_in_millis"), + allOf(greaterThanOrEqualTo(minStartTimeMillis), lessThanOrEqualTo(maxEndTimeMillis)) + ); + assertThat( + responseObjectPath.evaluate("log." + i + ".timestamp"), + request.getParameters().containsKey("human") ? instanceOf(String.class) : nullValue() + ); + final String maybeSnapshotName = responseObjectPath.evaluate("log." + i + ".snapshot.snapshot"); + if (maybeSnapshotName != null) { + assertTrue(seenSnapshotNames.add(maybeSnapshotName)); + } else { + final String indexName = responseObjectPath.evaluate("log." + i + ".index.name"); + assertNotNull(indexName); + assertTrue(seenIndexNames.add(indexName)); + assertEquals( + testContext.snapshotNames().size(), + (int) responseObjectPath.evaluate("log." + i + ".snapshot_restorability.total_snapshot_count") + ); + assertEquals( + testContext.snapshotNames().size(), + (int) responseObjectPath.evaluate("log." + i + ".snapshot_restorability.restorable_snapshot_count") + ); + } + } + assertEquals(Set.copyOf(testContext.snapshotNames()), seenSnapshotNames); + assertEquals(Set.copyOf(testContext.indexNames()), seenIndexNames); + + assertEquals(0, (int) responseObjectPath.evaluate("results.total_anomalies")); + assertEquals("pass", responseObjectPath.evaluate("results.result")); + } + + public void testTaskStatus() throws IOException { + final var testContext = createTestContext(); + + // use non-master node to coordinate the request so that we can capture chunks being sent back + final var coordNodeName = getCoordinatingNodeName(); + final var coordNodeTransportService = MockTransportService.getInstance(coordNodeName); + final var masterTaskManager = MockTransportService.getInstance(internalCluster().getMasterName()).getTaskManager(); + + final SubscribableListener snapshotsCompleteStatusListener = new SubscribableListener<>(); + final AtomicInteger chunksSeenCounter = new AtomicInteger(); + + coordNodeTransportService.addRequestHandlingBehavior( + TransportRepositoryVerifyIntegrityResponseChunkAction.ACTION_NAME, + (handler, request, channel, task) -> { + final SubscribableListener unblockChunkHandlingListener = switch (request.chunkContents().type()) { + case START_RESPONSE -> { + final var status = asInstanceOf( + RepositoryVerifyIntegrityTask.Status.class, + randomBoolean() + ? masterTaskManager.getTask(task.getParentTaskId().getId()).getStatus() + : client().admin() + .cluster() + .prepareGetTask(task.getParentTaskId()) + .get(SAFE_AWAIT_TIMEOUT) + .getTask() + .getTask() + .status() + ); + assertEquals(testContext.repositoryName(), status.repositoryName()); + assertEquals(testContext.snapshotNames().size(), status.snapshotCount()); + assertEquals(0L, status.snapshotsVerified()); + assertEquals(testContext.indexNames().size(), status.indexCount()); + assertEquals(0L, status.indicesVerified()); + assertEquals(testContext.indexNames().size() * testContext.snapshotNames().size(), status.indexSnapshotCount()); + assertEquals(0L, status.indexSnapshotsVerified()); + assertEquals(0L, status.blobsVerified()); + assertEquals(0L, status.blobBytesVerified()); + yield SubscribableListener.newSucceeded(null); + } + case INDEX_RESTORABILITY -> { + // several of these chunks might arrive concurrently; we want to verify the task status before processing any of + // them, so use SubscribableListener to pick out the first status + snapshotsCompleteStatusListener.onResponse( + asInstanceOf( + RepositoryVerifyIntegrityTask.Status.class, + masterTaskManager.getTask(task.getParentTaskId().getId()).getStatus() + ) + ); + yield snapshotsCompleteStatusListener.andThenAccept(status -> { + assertEquals(testContext.repositoryName(), status.repositoryName()); + assertEquals(testContext.snapshotNames().size(), status.snapshotCount()); + assertEquals(testContext.snapshotNames().size(), status.snapshotsVerified()); + assertEquals(testContext.indexNames().size(), status.indexCount()); + assertEquals(0L, status.indicesVerified()); + }); + } + case SNAPSHOT_INFO -> SubscribableListener.newSucceeded(null); + case ANOMALY -> fail(null, "should not see anomalies"); + }; + + unblockChunkHandlingListener.addListener(ActionTestUtils.assertNoFailureListener(ignored -> { + chunksSeenCounter.incrementAndGet(); + handler.messageReceived(request, channel, task); + })); + } + ); + + try (var client = createRestClient(coordNodeName)) { + final var response = client.performRequest(testContext.getVerifyIntegrityRequest()); + assertEquals(1 + testContext.indexNames().size() + testContext.snapshotNames().size(), chunksSeenCounter.get()); + assertEquals(200, response.getStatusLine().getStatusCode()); + final var responseObjectPath = ObjectPath.createFromResponse(response); + assertEquals(0, (int) responseObjectPath.evaluate("results.total_anomalies")); + assertEquals("pass", responseObjectPath.evaluate("results.result")); + } finally { + coordNodeTransportService.clearAllRules(); + } + } + + public void testShardSnapshotFailed() throws IOException { + final var testContext = createTestContext(); + + final var newIndex = randomIdentifier(); + assertAcked( + client().admin() + .indices() + .prepareCreate(newIndex) + .setWaitForActiveShards(ActiveShardCount.NONE) + .setSettings(indexSettings(1, 0).put(INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_id", "not-a-node-id")) + ); + + final var createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, testContext.repositoryName(), randomIdentifier()) + .setWaitForCompletion(true) + .setPartial(true) + .get(); + + assertEquals(SnapshotState.PARTIAL, createSnapshotResponse.getSnapshotInfo().state()); + + final var takeGoodSnapshot = randomBoolean(); + if (takeGoodSnapshot) { + updateIndexSettings(Settings.builder().putNull(INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_id"), newIndex); + ensureGreen(newIndex); + createSnapshot(testContext.repositoryName(), randomIdentifier(), List.of(newIndex)); + } + + final Response response; + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + response = getRestClient().performRequest(testContext.getVerifyIntegrityRequest()); + } finally { + assertAcked( + client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, testContext.repositoryName()) + ); + } + assertEquals(200, response.getStatusLine().getStatusCode()); + final var responseObjectPath = ObjectPath.createFromResponse(response); + assertThat(getAnomalies(responseObjectPath), equalTo(Set.of())); + assertEquals(0, (int) responseObjectPath.evaluate("results.total_anomalies")); + assertEquals("pass", responseObjectPath.evaluate("results.result")); + + final var logEntryCount = responseObjectPath.evaluateArraySize("log"); + for (int i = 0; i < logEntryCount; i++) { + if (newIndex.equals(responseObjectPath.evaluate("log." + i + ".index.name"))) { + assertEquals( + takeGoodSnapshot ? 2 : 1, + (int) responseObjectPath.evaluate("log." + i + ".snapshot_restorability.total_snapshot_count") + ); + assertEquals( + takeGoodSnapshot ? 1 : 0, + (int) responseObjectPath.evaluate("log." + i + ".snapshot_restorability.restorable_snapshot_count") + ); + } + } + } + + public void testCorruption() throws IOException { + final var testContext = createTestContext(); + + final Response response; + final Path corruptedFile; + final RepositoryFileType corruptedFileType; + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + corruptedFile = BlobStoreCorruptionUtils.corruptRandomFile(testContext.repositoryRootPath()); + corruptedFileType = RepositoryFileType.getRepositoryFileType(testContext.repositoryRootPath(), corruptedFile); + logger.info("--> corrupted file: {}", corruptedFile); + logger.info("--> corrupted file type: {}", corruptedFileType); + + final var request = testContext.getVerifyIntegrityRequest(); + if (corruptedFileType == RepositoryFileType.SHARD_DATA || randomBoolean()) { + request.addParameter("verify_blob_contents", null); + } + response = getRestClient().performRequest(request); + } finally { + assertAcked( + client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, testContext.repositoryName()) + ); + } + assertEquals(200, response.getStatusLine().getStatusCode()); + final var responseObjectPath = ObjectPath.createFromResponse(response); + final var logEntryCount = responseObjectPath.evaluateArraySize("log"); + final var anomalies = new HashSet(); + final var seenIndexNames = new HashSet(); + int fullyRestorableIndices = 0; + for (int i = 0; i < logEntryCount; i++) { + final String maybeAnomaly = responseObjectPath.evaluate("log." + i + ".anomaly"); + if (maybeAnomaly != null) { + anomalies.add(maybeAnomaly); + } else { + final String indexName = responseObjectPath.evaluate("log." + i + ".index.name"); + if (indexName != null) { + assertTrue(seenIndexNames.add(indexName)); + assertThat(testContext.indexNames(), hasItem(indexName)); + final int totalSnapshots = responseObjectPath.evaluate("log." + i + ".snapshot_restorability.total_snapshot_count"); + final int restorableSnapshots = responseObjectPath.evaluate( + "log." + i + ".snapshot_restorability.restorable_snapshot_count" + ); + if (totalSnapshots == restorableSnapshots) { + fullyRestorableIndices += 1; + } + } + } + } + + assertThat( + fullyRestorableIndices, + corruptedFileType == RepositoryFileType.SHARD_GENERATION || corruptedFileType.equals(RepositoryFileType.GLOBAL_METADATA) + ? equalTo(testContext.indexNames().size()) + : lessThan(testContext.indexNames().size()) + ); + assertThat(anomalies, not(empty())); + assertThat(responseObjectPath.evaluate("results.total_anomalies"), greaterThanOrEqualTo(anomalies.size())); + assertEquals("fail", responseObjectPath.evaluate("results.result")); + + // remove permitted/expected anomalies to verify that no unexpected ones were seen + switch (corruptedFileType) { + case SNAPSHOT_INFO -> anomalies.remove("failed to load snapshot info"); + case GLOBAL_METADATA -> anomalies.remove("failed to load global metadata"); + case INDEX_METADATA -> anomalies.remove("failed to load index metadata"); + case SHARD_GENERATION -> anomalies.remove("failed to load shard generation"); + case SHARD_SNAPSHOT_INFO -> anomalies.remove("failed to load shard snapshot"); + case SHARD_DATA -> { + anomalies.remove("missing blob"); + anomalies.remove("mismatched blob length"); + anomalies.remove("corrupt data blob"); + } + } + assertThat(anomalies, empty()); + } + + public void testTransportException() throws IOException { + final var testContext = createTestContext(); + + // use non-master node to coordinate the request so that we can capture chunks being sent back + final var coordNodeName = getCoordinatingNodeName(); + final var coordNodeTransportService = MockTransportService.getInstance(coordNodeName); + final var masterTransportService = MockTransportService.getInstance(internalCluster().getMasterName()); + + final var messageCount = 2 // request & response + * (1 // forward to master + + 1 // start response + + testContext.indexNames().size() + testContext.snapshotNames().size()); + final var failureStep = between(1, messageCount); + + final var failTransportMessageBehaviour = new StubbableTransport.RequestHandlingBehavior<>() { + final AtomicInteger currentStep = new AtomicInteger(); + + @Override + public void messageReceived( + TransportRequestHandler handler, + TransportRequest request, + TransportChannel channel, + Task task + ) throws Exception { + if (currentStep.incrementAndGet() == failureStep) { + throw new ElasticsearchException("simulated"); + } else { + handler.messageReceived(request, new TransportChannel() { + @Override + public String getProfileName() { + return "test"; + } + + @Override + public void sendResponse(TransportResponse response) { + if (currentStep.incrementAndGet() == failureStep) { + channel.sendResponse(new ElasticsearchException("simulated")); + } else { + channel.sendResponse(response); + } + } + + @Override + public void sendResponse(Exception exception) { + if (currentStep.incrementAndGet() == failureStep) { + throw new AssertionError("shouldn't have failed yet"); + } else { + channel.sendResponse(exception); + } + } + }, task); + } + } + }; + + masterTransportService.addRequestHandlingBehavior( + TransportRepositoryVerifyIntegrityAction.ACTION_NAME, + failTransportMessageBehaviour + ); + + coordNodeTransportService.addRequestHandlingBehavior( + TransportRepositoryVerifyIntegrityResponseChunkAction.ACTION_NAME, + failTransportMessageBehaviour + ); + + final var request = testContext.getVerifyIntegrityRequest(); + if (failureStep <= 2) { + request.addParameter("ignore", "500"); + } + final Response response; + try (var restClient = createRestClient(coordNodeName)) { + response = restClient.performRequest(request); + } + final var responseObjectPath = ObjectPath.createFromResponse(response); + if (failureStep <= 2) { + assertEquals(500, response.getStatusLine().getStatusCode()); + assertNotNull(responseObjectPath.evaluate("error")); + assertEquals(500, (int) responseObjectPath.evaluate("status")); + } else { + assertEquals(200, response.getStatusLine().getStatusCode()); + assertNotNull(responseObjectPath.evaluate("log")); + assertNotNull(responseObjectPath.evaluate("exception")); + } + + assertNull(responseObjectPath.evaluate("results")); + } + + public void testBadSnapshotInfo() throws IOException { + final var testContext = createTestContext(); + + final var snapshotInfoBlob = BlobStoreCorruptionUtils.getRandomFileToCorrupt( + testContext.repositoryRootPath(), + RepositoryFileType.SNAPSHOT_INFO + ); + + final SnapshotInfo snapshotInfo; + try (var inputStream = Files.newInputStream(snapshotInfoBlob)) { + snapshotInfo = SNAPSHOT_FORMAT.deserialize(testContext.repositoryName(), xContentRegistry(), inputStream); + } + + final var newIndices = new ArrayList<>(snapshotInfo.indices()); + newIndices.remove(between(0, newIndices.size() - 1)); + + final Response response; + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + try (var outputStream = Files.newOutputStream(snapshotInfoBlob)) { + SNAPSHOT_FORMAT.serialize( + new SnapshotInfo( + snapshotInfo.snapshot(), + newIndices, + snapshotInfo.dataStreams(), + snapshotInfo.featureStates(), + snapshotInfo.reason(), + snapshotInfo.version(), + snapshotInfo.startTime(), + snapshotInfo.endTime(), + snapshotInfo.totalShards(), + snapshotInfo.successfulShards(), + snapshotInfo.shardFailures(), + snapshotInfo.includeGlobalState(), + snapshotInfo.userMetadata(), + snapshotInfo.state(), + snapshotInfo.indexSnapshotDetails() + ), + snapshotInfoBlob.toString(), + randomBoolean(), + outputStream + ); + } + + response = getRestClient().performRequest(testContext.getVerifyIntegrityRequest()); + } finally { + assertAcked( + client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, testContext.repositoryName()) + ); + } + assertEquals(200, response.getStatusLine().getStatusCode()); + assertThat(getAnomalies(ObjectPath.createFromResponse(response)), equalTo(Set.of("snapshot contents mismatch"))); + } + + public void testShardPathEmpty() throws IOException { + final var testContext = createTestContext(); + + final var shardPath = BlobStoreCorruptionUtils.getRandomFileToCorrupt( + testContext.repositoryRootPath(), + RepositoryFileType.SHARD_GENERATION + ).getParent(); + + final Response response; + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + IOUtils.rm(shardPath); + response = getRestClient().performRequest(testContext.getVerifyIntegrityRequest()); + } finally { + assertAcked( + client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, testContext.repositoryName()) + ); + } + assertEquals(200, response.getStatusLine().getStatusCode()); + assertThat(getAnomalies(ObjectPath.createFromResponse(response)), equalTo(Set.of("failed to load shard snapshot"))); + } + + public void testShardPathUnreadable() throws IOException { + final var testContext = createTestContext(); + + final var shardPath = BlobStoreCorruptionUtils.getRandomFileToCorrupt( + testContext.repositoryRootPath(), + RepositoryFileType.SHARD_GENERATION + ).getParent(); + + final Response response; + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + IOUtils.rm(shardPath); + Files.write(shardPath, new byte[0], StandardOpenOption.CREATE_NEW); + response = getRestClient().performRequest(testContext.getVerifyIntegrityRequest()); + } finally { + assertAcked( + client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, testContext.repositoryName()) + ); + } + assertEquals(200, response.getStatusLine().getStatusCode()); + assertThat(getAnomalies(ObjectPath.createFromResponse(response)), equalTo(Set.of("failed to list shard container contents"))); + } + + public void testShardGenerationMissing() throws IOException { + final var testContext = createTestContext(); + + final var repository = asInstanceOf( + BlobStoreRepository.class, + internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(testContext.repositoryName()) + ); + final var repoSettings = repository.getMetadata().settings(); + + final RepositoryData repositoryData = safeAwait(l -> repository.getRepositoryData(EsExecutors.DIRECT_EXECUTOR_SERVICE, l)); + + assertAcked( + client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, testContext.repositoryName()) + ); + + final var rootBlob = BlobStoreCorruptionUtils.getRandomFileToCorrupt( + testContext.repositoryRootPath(), + RepositoryFileType.ROOT_INDEX_N + ); + + final var indexToBreak = randomFrom(testContext.indexNames()); + final var newShardGenerations = ShardGenerations.builder(); + for (final var index : repositoryData.shardGenerations().indices()) { + final var indexShardGenerations = repositoryData.shardGenerations().getGens(index); + for (int i = 0; i < indexShardGenerations.size(); i++) { + if (i > 0 || index.getName().equals(indexToBreak) == false) { + newShardGenerations.put(index, i, indexShardGenerations.get(i)); + } + } + } + + final var brokenRepositoryData = new RepositoryData( + repositoryData.getUuid(), + repositoryData.getGenId(), + repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, Function.identity())), + repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData::getSnapshotDetails)), + repositoryData.getIndices().values().stream().collect(Collectors.toMap(Function.identity(), repositoryData::getSnapshots)), + newShardGenerations.build(), + repositoryData.indexMetaDataGenerations(), + repositoryData.getClusterUUID() + ); + + final Response response; + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + + Files.write( + rootBlob, + BytesReference.toBytes( + BytesReference.bytes(brokenRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), IndexVersion.current())) + ), + StandardOpenOption.TRUNCATE_EXISTING + ); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, testContext.repositoryName()) + .setType(FsRepository.TYPE) + .setSettings(repoSettings) + ); + + response = getRestClient().performRequest(testContext.getVerifyIntegrityRequest()); + } finally { + assertAcked( + client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, testContext.repositoryName()) + ); + } + assertEquals(200, response.getStatusLine().getStatusCode()); + assertThat(getAnomalies(ObjectPath.createFromResponse(response)), equalTo(Set.of("shard generation not defined"))); + } + + public void testSnapshotNotInShardGeneration() throws IOException { + final var testContext = createTestContext(); + runInconsistentShardGenerationBlobTest( + testContext, + blobStoreIndexShardSnapshots -> blobStoreIndexShardSnapshots.withRetainedSnapshots( + testContext.snapshotNames().stream().skip(1).map(n -> new SnapshotId(n, "_na_")).collect(Collectors.toSet()) + ), + "snapshot not in shard generation" + ); + } + + public void testBlobInShardGenerationButNotSnapshot() throws IOException { + final var testContext = createTestContext(); + final var snapshotToUpdate = randomFrom(testContext.snapshotNames()); + runInconsistentShardGenerationBlobTest(testContext, blobStoreIndexShardSnapshots -> { + BlobStoreIndexShardSnapshots result = BlobStoreIndexShardSnapshots.EMPTY; + for (final var snapshotFiles : blobStoreIndexShardSnapshots.snapshots()) { + if (snapshotFiles.snapshot().equals(snapshotToUpdate)) { + result = result.withAddedSnapshot( + new SnapshotFiles( + snapshotToUpdate, + CollectionUtils.appendToCopy( + snapshotFiles.indexFiles(), + new BlobStoreIndexShardSnapshot.FileInfo( + "extra", + new StoreFileMetadata("extra", 1L, "checksum", Version.CURRENT.toString()), + ByteSizeValue.ONE + ) + ), + snapshotFiles.shardStateIdentifier() + ) + ); + } else { + result = result.withAddedSnapshot(snapshotFiles); + } + } + return result; + }, "blob in shard generation but not snapshot"); + } + + public void testSnapshotShardGenerationMismatch() throws IOException { + final var testContext = createTestContext(); + runInconsistentShardGenerationBlobTest(testContext, blobStoreIndexShardSnapshots -> { + final var fileToUpdate = randomFrom(blobStoreIndexShardSnapshots.iterator().next().indexFiles()); + final var updatedFile = new BlobStoreIndexShardSnapshot.FileInfo( + fileToUpdate.name(), + fileToUpdate.metadata(), + ByteSizeValue.ONE + ); + assertFalse(fileToUpdate.isSame(updatedFile)); + + BlobStoreIndexShardSnapshots result = BlobStoreIndexShardSnapshots.EMPTY; + for (final var snapshotFiles : blobStoreIndexShardSnapshots.snapshots()) { + result = result.withAddedSnapshot( + new SnapshotFiles( + snapshotFiles.snapshot(), + snapshotFiles.indexFiles() + .stream() + .map(fileInfo -> fileInfo.name().equals(fileToUpdate.name()) ? updatedFile : fileInfo) + .toList(), + snapshotFiles.shardStateIdentifier() + ) + ); + } + return result; + }, "snapshot shard generation mismatch"); + } + + public void testBlobInSnapshotNotShardGeneration() throws IOException { + final var testContext = createTestContext(); + final var snapshotToUpdate = randomFrom(testContext.snapshotNames()); + runInconsistentShardGenerationBlobTest(testContext, blobStoreIndexShardSnapshots -> { + BlobStoreIndexShardSnapshots result = BlobStoreIndexShardSnapshots.EMPTY; + for (final var snapshotFiles : blobStoreIndexShardSnapshots.snapshots()) { + if (snapshotFiles.snapshot().equals(snapshotToUpdate)) { + final var indexFilesCopy = new ArrayList<>(snapshotFiles.indexFiles()); + indexFilesCopy.remove(between(0, indexFilesCopy.size() - 1)); + result = result.withAddedSnapshot( + new SnapshotFiles(snapshotToUpdate, indexFilesCopy, snapshotFiles.shardStateIdentifier()) + ); + } else { + result = result.withAddedSnapshot(snapshotFiles); + } + } + return result; + }, "blob in snapshot but not shard generation"); + } + + private void runInconsistentShardGenerationBlobTest( + TestContext testContext, + UnaryOperator shardGenerationUpdater, + String expectedAnomaly + ) throws IOException { + + final var shardGenerationBlob = BlobStoreCorruptionUtils.getRandomFileToCorrupt( + testContext.repositoryRootPath(), + RepositoryFileType.SHARD_GENERATION + ); + + final BlobStoreIndexShardSnapshots blobStoreIndexShardSnapshots; + try (var inputStream = Files.newInputStream(shardGenerationBlob)) { + blobStoreIndexShardSnapshots = INDEX_SHARD_SNAPSHOTS_FORMAT.deserialize( + testContext.repositoryName(), + xContentRegistry(), + inputStream + ); + } + + final Response response; + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + try (var outputStream = Files.newOutputStream(shardGenerationBlob)) { + INDEX_SHARD_SNAPSHOTS_FORMAT.serialize( + shardGenerationUpdater.apply(blobStoreIndexShardSnapshots), + shardGenerationBlob.toString(), + randomBoolean(), + outputStream + ); + } + response = getRestClient().performRequest(testContext.getVerifyIntegrityRequest()); + } finally { + assertAcked( + client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, testContext.repositoryName()) + ); + } + assertEquals(200, response.getStatusLine().getStatusCode()); + assertThat(getAnomalies(ObjectPath.createFromResponse(response)), equalTo(Set.of(expectedAnomaly))); + } + + private Set getAnomalies(ObjectPath responseObjectPath) throws IOException { + final var logEntryCount = responseObjectPath.evaluateArraySize("log"); + final var anomalies = new HashSet(); + for (int i = 0; i < logEntryCount; i++) { + final String maybeAnomaly = responseObjectPath.evaluate("log." + i + ".anomaly"); + if (maybeAnomaly != null) { + anomalies.add(maybeAnomaly); + } + } + + assertThat(responseObjectPath.evaluate("results.total_anomalies"), greaterThanOrEqualTo(anomalies.size())); + if (anomalies.size() > 0) { + assertEquals("fail", responseObjectPath.evaluate("results.result")); + } + + return anomalies; + } + + private record TestContext(String repositoryName, Path repositoryRootPath, List indexNames, List snapshotNames) { + Request getVerifyIntegrityRequest() { + final var request = new Request("POST", "/_snapshot/" + repositoryName + "/_verify_integrity"); + if (randomBoolean()) { + request.addParameter("human", null); + } + if (randomBoolean()) { + request.addParameter("pretty", null); + } + return request; + } + } + + private TestContext createTestContext() { + final var repositoryName = randomIdentifier(); + final var repositoryRootPath = randomRepoPath(); + + createRepository(repositoryName, FsRepository.TYPE, repositoryRootPath); + + final var indexNames = randomList(1, 3, ESTestCase::randomIdentifier); + for (var indexName : indexNames) { + createIndexWithRandomDocs(indexName, between(1, 100)); + flushAndRefresh(indexName); + } + + final var snapshotNames = randomList(1, 3, ESTestCase::randomIdentifier); + for (var snapshotName : snapshotNames) { + createSnapshot(repositoryName, snapshotName, indexNames); + } + + return new TestContext(repositoryName, repositoryRootPath, indexNames, snapshotNames); + } + + private static String getCoordinatingNodeName() { + if (internalCluster().size() == 1) { + internalCluster().startNode(); + } + return randomValueOtherThan(internalCluster().getMasterName(), () -> internalCluster().getRandomNodeName()); + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/module-info.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/module-info.java new file mode 100644 index 0000000000000..70385cdc4cf04 --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/module-info.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +module org.elasticsearch.repositories.blobstore.testkit { + requires org.elasticsearch.base; + requires org.elasticsearch.server; + requires org.elasticsearch.xcontent; + requires org.elasticsearch.xcore; + + requires org.apache.logging.log4j; + requires org.apache.lucene.core; + requires org.elasticsearch.logging; + + exports org.elasticsearch.repositories.blobstore.testkit.analyze; + exports org.elasticsearch.repositories.blobstore.testkit.integrity; + + provides org.elasticsearch.features.FeatureSpecification + with + org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKitFeatures; +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java index 04d59906e6db3..b0ae1b0752b71 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java @@ -22,8 +22,12 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.testkit.analyze.RepositoryAnalyzeAction; import org.elasticsearch.repositories.blobstore.testkit.analyze.RestRepositoryAnalyzeAction; +import org.elasticsearch.repositories.blobstore.testkit.integrity.RepositoryVerifyIntegrityTask; +import org.elasticsearch.repositories.blobstore.testkit.integrity.RestRepositoryVerifyIntegrityAction; +import org.elasticsearch.repositories.blobstore.testkit.integrity.TransportRepositoryVerifyIntegrityCoordinationAction; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.tasks.Task; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -35,7 +39,13 @@ public class SnapshotRepositoryTestKit extends Plugin implements ActionPlugin { @Override public List> getActions() { - return List.of(new ActionHandler<>(RepositoryAnalyzeAction.INSTANCE, RepositoryAnalyzeAction.class)); + return List.of( + new ActionHandler<>(RepositoryAnalyzeAction.INSTANCE, RepositoryAnalyzeAction.class), + new ActionHandler<>( + TransportRepositoryVerifyIntegrityCoordinationAction.INSTANCE, + TransportRepositoryVerifyIntegrityCoordinationAction.class + ) + ); } @Override @@ -50,7 +60,7 @@ public List getRestHandlers( Supplier nodesInCluster, Predicate clusterSupportsFeature ) { - return List.of(new RestRepositoryAnalyzeAction()); + return List.of(new RestRepositoryAnalyzeAction(), new RestRepositoryVerifyIntegrityAction()); } public static void humanReadableNanos(XContentBuilder builder, String rawFieldName, String readableFieldName, long nanos) @@ -63,4 +73,15 @@ public static void humanReadableNanos(XContentBuilder builder, String rawFieldNa builder.field(rawFieldName, nanos); } + + @Override + public List getNamedWriteables() { + return List.of( + new NamedWriteableRegistry.Entry( + Task.Status.class, + RepositoryVerifyIntegrityTask.Status.NAME, + RepositoryVerifyIntegrityTask.Status::new + ) + ); + } } diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKitFeatures.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKitFeatures.java new file mode 100644 index 0000000000000..cc513a948519b --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKitFeatures.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Set; + +import static org.elasticsearch.repositories.blobstore.testkit.integrity.RestRepositoryVerifyIntegrityAction.REPOSITORY_VERIFY_INTEGRITY_FEATURE; + +public class SnapshotRepositoryTestKitFeatures implements FeatureSpecification { + @Override + public Set getFeatures() { + return Set.of(REPOSITORY_VERIFY_INTEGRITY_FEATURE); + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/ActiveRepositoryVerifyIntegrityTasks.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/ActiveRepositoryVerifyIntegrityTasks.java new file mode 100644 index 0000000000000..ac410465c3deb --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/ActiveRepositoryVerifyIntegrityTasks.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +import java.util.Map; + +/** + * The repository-verify-integrity tasks that this node is currently coordinating. + */ +class ActiveRepositoryVerifyIntegrityTasks { + + private final Map responseStreamsByCoordinatingTaskId = ConcurrentCollections + .newConcurrentMap(); + + Releasable registerResponseBuilder(long coordinatingTaskId, RepositoryVerifyIntegrityResponseStream responseStream) { + assert responseStream.hasReferences(); // ref held until the REST-layer listener is completed + + final var previous = responseStreamsByCoordinatingTaskId.putIfAbsent(coordinatingTaskId, responseStream); + if (previous != null) { + final var exception = new IllegalStateException("already executing verify task [" + coordinatingTaskId + "]"); + assert false : exception; + throw exception; + } + + return Releasables.assertOnce(() -> { + final var removed = responseStreamsByCoordinatingTaskId.remove(coordinatingTaskId, responseStream); + if (removed == false) { + final var exception = new IllegalStateException("already completed verify task [" + coordinatingTaskId + "]"); + assert false : exception; + throw exception; + } + }); + } + + /** + * Obtain the response stream for the given coordinating-node task ID, and increment its refcount. + * @throws ResourceNotFoundException if the task is not running or its refcount already reached zero (likely because it completed) + */ + RepositoryVerifyIntegrityResponseStream acquireResponseStream(long taskId) { + final var outerRequest = responseStreamsByCoordinatingTaskId.get(taskId); + if (outerRequest == null || outerRequest.tryIncRef() == false) { + throw new ResourceNotFoundException("verify task [" + taskId + "] not found"); + } + return outerRequest; + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/IndexDescription.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/IndexDescription.java new file mode 100644 index 0000000000000..e13d970346868 --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/IndexDescription.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Details of an index in a specific snapshot, identifying its corresponding {@link org.elasticsearch.cluster.metadata.IndexMetadata} blob + * and the number of shards. + */ +public record IndexDescription(IndexId indexId, @Nullable String indexMetadataBlob, int shardCount) implements Writeable, ToXContentObject { + + public IndexDescription { + if (indexId == null || shardCount < 0) { + throw new IllegalArgumentException("invalid IndexDescription"); + } + } + + public IndexDescription(StreamInput in) throws IOException { + this(new IndexId(in), in.readOptionalString(), in.readVInt()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + indexId.writeTo(out); + out.writeOptionalString(indexMetadataBlob); + out.writeVInt(shardCount); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("name", indexId.getName()); + builder.field("uuid", indexId.getId()); + if (indexMetadataBlob != null) { + builder.field("metadata_blob", indexMetadataBlob); + } + if (shardCount > 0) { + builder.field("shards", shardCount); + } + return builder.endObject(); + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryIntegrityVerifier.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryIntegrityVerifier.java new file mode 100644 index 0000000000000..a5c81d18071fc --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryIntegrityVerifier.java @@ -0,0 +1,949 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.RateLimiter; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.blobstore.OperationPurpose; +import org.elasticsearch.common.blobstore.support.BlobMetadata; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.CancellableThreads; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ThrottledIterator; +import org.elasticsearch.common.util.concurrent.ThrottledTaskRunner; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots; +import org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream; +import org.elasticsearch.index.snapshots.blobstore.SlicedInputStream; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.RepositoryVerificationException; +import org.elasticsearch.repositories.ShardGeneration; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; +import java.util.function.BooleanSupplier; +import java.util.function.LongSupplier; + +import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; + +class RepositoryIntegrityVerifier { + private static final Logger logger = LogManager.getLogger(RepositoryIntegrityVerifier.class); + + private final LongSupplier currentTimeMillisSupplier; + private final BlobStoreRepository blobStoreRepository; + private final RepositoryVerifyIntegrityResponseChunk.Writer responseChunkWriter; + private final String repositoryName; + private final RepositoryVerifyIntegrityParams requestParams; + private final RepositoryData repositoryData; + private final BooleanSupplier isCancelledSupplier; + private final CancellableRunner metadataTaskRunner; + private final CancellableRunner snapshotTaskRunner; + private final RateLimiter rateLimiter; + + private final Set unreadableSnapshotInfoUuids = ConcurrentCollections.newConcurrentSet(); + private final long snapshotCount; + private final AtomicLong snapshotProgress = new AtomicLong(); + private final long indexCount; + private final AtomicLong indexProgress = new AtomicLong(); + private final long indexSnapshotCount; + private final AtomicLong indexSnapshotProgress = new AtomicLong(); + private final AtomicLong blobsVerified = new AtomicLong(); + private final AtomicLong blobBytesVerified = new AtomicLong(); + private final AtomicLong throttledNanos; + private final AtomicLong failedShardSnapshotsCount = new AtomicLong(); + private final Set failedShardSnapshotDescriptions = ConcurrentCollections.newConcurrentSet(); + + RepositoryIntegrityVerifier( + LongSupplier currentTimeMillisSupplier, + BlobStoreRepository blobStoreRepository, + RepositoryVerifyIntegrityResponseChunk.Writer responseChunkWriter, + RepositoryVerifyIntegrityParams requestParams, + RepositoryData repositoryData, + CancellableThreads cancellableThreads + ) { + this.currentTimeMillisSupplier = currentTimeMillisSupplier; + this.blobStoreRepository = blobStoreRepository; + this.repositoryName = blobStoreRepository.getMetadata().name(); + this.responseChunkWriter = responseChunkWriter; + this.requestParams = requestParams; + this.repositoryData = repositoryData; + this.isCancelledSupplier = cancellableThreads::isCancelled; + this.snapshotTaskRunner = new CancellableRunner( + new ThrottledTaskRunner( + "verify-blob", + requestParams.blobThreadPoolConcurrency(), + blobStoreRepository.threadPool().executor(ThreadPool.Names.SNAPSHOT) + ), + cancellableThreads + ); + this.metadataTaskRunner = new CancellableRunner( + new ThrottledTaskRunner( + "verify-metadata", + requestParams.metaThreadPoolConcurrency(), + blobStoreRepository.threadPool().executor(ThreadPool.Names.SNAPSHOT_META) + ), + cancellableThreads + ); + + this.snapshotCount = repositoryData.getSnapshotIds().size(); + this.indexCount = repositoryData.getIndices().size(); + this.indexSnapshotCount = repositoryData.getIndexSnapshotCount(); + this.rateLimiter = new RateLimiter.SimpleRateLimiter(requestParams.maxBytesPerSec().getMbFrac()); + + this.throttledNanos = new AtomicLong(requestParams.verifyBlobContents() ? 1 : 0); // nonzero if verifying so status reported + } + + RepositoryVerifyIntegrityTask.Status getStatus() { + return new RepositoryVerifyIntegrityTask.Status( + repositoryName, + repositoryData.getGenId(), + repositoryData.getUuid(), + snapshotCount, + snapshotProgress.get(), + indexCount, + indexProgress.get(), + indexSnapshotCount, + indexSnapshotProgress.get(), + blobsVerified.get(), + blobBytesVerified.get(), + throttledNanos.get() + ); + } + + void start(ActionListener listener) { + logger.info( + """ + [{}] verifying metadata integrity for index generation [{}]: \ + repo UUID [{}], cluster UUID [{}], snapshots [{}], indices [{}], index snapshots [{}]""", + repositoryName, + repositoryData.getGenId(), + repositoryData.getUuid(), + repositoryData.getClusterUUID(), + getSnapshotCount(), + getIndexCount(), + getIndexSnapshotCount() + ); + + SubscribableListener + // first verify the top-level properties of the snapshots + .newForked(this::verifySnapshots) + .andThen(this::checkFailedShardSnapshotCount) + // then verify the restorability of each index + .andThen(this::verifyIndices) + .andThenAccept(v -> this.ensureNotCancelled()) + // see if the repository data has changed + .andThen( + l -> blobStoreRepository.getRepositoryData(blobStoreRepository.threadPool().executor(ThreadPool.Names.MANAGEMENT), l) + ) + // log the completion and return the result + .addListener(new ActionListener<>() { + @Override + public void onResponse(RepositoryData finalRepositoryData) { + logger.info( + "[{}] completed verifying metadata integrity for index generation [{}]: repo UUID [{}], cluster UUID [{}]", + repositoryName, + repositoryData.getGenId(), + repositoryData.getUuid(), + repositoryData.getClusterUUID() + ); + listener.onResponse(new RepositoryVerifyIntegrityResponse(getStatus(), finalRepositoryData.getGenId())); + } + + @Override + public void onFailure(Exception e) { + logger.warn( + () -> Strings.format( + "[%s] failed verifying metadata integrity for index generation [%d]: repo UUID [%s], cluster UUID [%s]", + repositoryName, + repositoryData.getGenId(), + repositoryData.getUuid(), + repositoryData.getClusterUUID() + ), + e + ); + listener.onFailure(e); + } + }); + } + + private void ensureNotCancelled() { + if (isCancelledSupplier.getAsBoolean()) { + throw new TaskCancelledException("task cancelled"); + } + } + + private void verifySnapshots(ActionListener listener) { + new SnapshotsVerifier().run(listener); + } + + /** + * Verifies the top-level snapshot metadata in the repo, including {@link SnapshotInfo} and optional {@link Metadata} blobs. + */ + private class SnapshotsVerifier { + final Map> indexNamesBySnapshotName; + + SnapshotsVerifier() { + indexNamesBySnapshotName = Maps.newHashMapWithExpectedSize(repositoryData.getIndices().size()); + for (final var indexId : repositoryData.getIndices().values()) { + for (final var snapshotId : repositoryData.getSnapshots(indexId)) { + indexNamesBySnapshotName.computeIfAbsent(snapshotId.getName(), ignored -> new HashSet<>()).add(indexId.getName()); + } + } + } + + void run(ActionListener listener) { + var listeners = new RefCountingListener(listener); + runThrottled( + Iterators.failFast( + repositoryData.getSnapshotIds().iterator(), + () -> isCancelledSupplier.getAsBoolean() || listeners.isFailing() + ), + (releasable, snapshotId) -> new SnapshotVerifier(snapshotId).run( + ActionListener.assertOnce(ActionListener.releaseAfter(listeners.acquire(), releasable)) + ), + requestParams.snapshotVerificationConcurrency(), + snapshotProgress, + listeners + ); + } + + /** + * Verifies a single snapshot's metadata, including its {@link SnapshotInfo} and optional {@link Metadata} blobs. + */ + private class SnapshotVerifier { + private final SnapshotId snapshotId; + + SnapshotVerifier(SnapshotId snapshotId) { + this.snapshotId = snapshotId; + } + + void run(ActionListener listener) { + if (isCancelledSupplier.getAsBoolean()) { + // getSnapshotInfo does its own forking, so we must check for cancellation here + listener.onResponse(null); + return; + } + + blobStoreRepository.getSnapshotInfo(snapshotId, new ActionListener<>() { + @Override + public void onResponse(SnapshotInfo snapshotInfo) { + verifySnapshotInfo(snapshotInfo, listener); + } + + @Override + public void onFailure(Exception e) { + unreadableSnapshotInfoUuids.add(snapshotId.getUUID()); + anomaly("failed to load snapshot info").snapshotId(snapshotId).exception(e).write(listener); + } + }); + } + + void verifySnapshotInfo(SnapshotInfo snapshotInfo, ActionListener listener) { + final var chunkBuilder = new RepositoryVerifyIntegrityResponseChunk.Builder( + responseChunkWriter, + RepositoryVerifyIntegrityResponseChunk.Type.SNAPSHOT_INFO, + currentTimeMillisSupplier.getAsLong() + ).snapshotInfo(snapshotInfo); + + // record the SnapshotInfo in the response + final var chunkWrittenStep = SubscribableListener.newForked(chunkBuilder::write); + + if (failedShardSnapshotsCount.get() < requestParams.maxFailedShardSnapshots()) { + for (final var shardFailure : snapshotInfo.shardFailures()) { + if (failedShardSnapshotsCount.getAndIncrement() < requestParams.maxFailedShardSnapshots()) { + failedShardSnapshotDescriptions.add( + getShardSnapshotDescription(snapshotId, shardFailure.index(), shardFailure.shardId()) + ); + } + } + } else { + failedShardSnapshotsCount.addAndGet(snapshotInfo.shardFailures().size()); + } + + // check the indices in the SnapshotInfo match those in RepositoryData + final var snapshotContentsOkStep = chunkWrittenStep.andThen(l -> { + if (Set.copyOf(snapshotInfo.indices()).equals(indexNamesBySnapshotName.get(snapshotId.getName()))) { + l.onResponse(null); + } else { + anomaly("snapshot contents mismatch").snapshotId(snapshotId).write(l); + } + }); + + // check the global metadata is readable if present + final var globalMetadataOkStep = Boolean.TRUE.equals(snapshotInfo.includeGlobalState()) + ? snapshotContentsOkStep.andThen(this::verifySnapshotGlobalMetadata) + : snapshotContentsOkStep; + + globalMetadataOkStep.addListener(listener); + } + + private void verifySnapshotGlobalMetadata(ActionListener listener) { + metadataTaskRunner.run(ActionRunnable.wrap(listener, l -> { + try { + blobStoreRepository.getSnapshotGlobalMetadata(snapshotId); + // no checks here, loading it is enough + l.onResponse(null); + } catch (Exception e) { + anomaly("failed to load global metadata").snapshotId(snapshotId).exception(e).write(l); + } + })); + } + } + } + + private void checkFailedShardSnapshotCount(ActionListener listener) { + if (failedShardSnapshotDescriptions.size() < failedShardSnapshotsCount.get()) { + listener.onFailure( + new RepositoryVerificationException( + repositoryName, + Strings.format( + """ + Cannot verify the integrity of all index snapshots because this repository contains too many shard snapshot \ + failures: there are [%d] shard snapshot failures but [?%s] is set to [%d]. \ + Please increase this limit if it is safe to do so.""", + failedShardSnapshotsCount.get(), + RepositoryVerifyIntegrityParams.MAX_FAILED_SHARD_SNAPSHOTS, + requestParams.maxFailedShardSnapshots() + ) + ) + ); + } else { + listener.onResponse(null); + } + } + + private void verifyIndices(ActionListener listener) { + var listeners = new RefCountingListener(listener); + runThrottled( + Iterators.failFast( + repositoryData.getIndices().values().iterator(), + () -> isCancelledSupplier.getAsBoolean() || listeners.isFailing() + ), + (releasable, indexId) -> new IndexVerifier(indexId).run(ActionListener.releaseAfter(listeners.acquire(), releasable)), + requestParams.indexVerificationConcurrency(), + indexProgress, + listeners + ); + } + + /** + * Verifies the integrity of the snapshots of a specific index + */ + private class IndexVerifier { + private final IndexId indexId; + private final ShardContainerContentsDeduplicator shardContainerContentsDeduplicator = new ShardContainerContentsDeduplicator(); + private final IndexDescriptionsDeduplicator indexDescriptionsDeduplicator = new IndexDescriptionsDeduplicator(); + private final AtomicInteger totalSnapshotCounter = new AtomicInteger(); + private final AtomicInteger restorableSnapshotCounter = new AtomicInteger(); + + IndexVerifier(IndexId indexId) { + this.indexId = indexId; + } + + void run(ActionListener listener) { + SubscribableListener + + .newForked(l -> { + var listeners = new RefCountingListener(1, l); + runThrottled( + Iterators.failFast( + repositoryData.getSnapshots(indexId).iterator(), + () -> isCancelledSupplier.getAsBoolean() || listeners.isFailing() + ), + (releasable, snapshotId) -> verifyIndexSnapshot( + snapshotId, + ActionListener.releaseAfter(listeners.acquire(), releasable) + ), + requestParams.indexSnapshotVerificationConcurrency(), + indexSnapshotProgress, + listeners + ); + }) + .andThen(l -> { + ensureNotCancelled(); + new RepositoryVerifyIntegrityResponseChunk.Builder( + responseChunkWriter, + RepositoryVerifyIntegrityResponseChunk.Type.INDEX_RESTORABILITY, + currentTimeMillisSupplier.getAsLong() + ).indexRestorability(indexId, totalSnapshotCounter.get(), restorableSnapshotCounter.get()).write(l); + }) + .addListener(listener); + } + + private void verifyIndexSnapshot(SnapshotId snapshotId, ActionListener listener) { + totalSnapshotCounter.incrementAndGet(); + indexDescriptionsDeduplicator.get(snapshotId).andThen((l, indexDescription) -> { + if (indexDescription == null) { + // index metadata was unreadable; anomaly already reported, skip further verification of this index snapshot + l.onResponse(null); + } else { + new ShardSnapshotsVerifier(snapshotId, indexDescription).run(l); + } + }).addListener(listener); + } + + /** + * Information about the contents of the {@code ${REPO}/indices/${INDEX}/${SHARD}/} container, shared across the verifications of + * each snapshot of this shard. + * + * @param shardId the numeric shard ID. + * @param blobsByName the {@link BlobMetadata} for every blob in the container, keyed by blob name. + * @param shardGeneration the current {@link ShardGeneration} for this shard, identifying the current {@code index-${UUID}} blob. + * @param filesByPhysicalNameBySnapshotName a {@link BlobStoreIndexShardSnapshot.FileInfo} for every tracked file, keyed by snapshot + * name and then by the file's physical name. + * @param blobContentsListeners a threadsafe mutable map, keyed by file name, for every tracked file that the verification process + * encounters. Used to avoid double-counting the size of any files, and also to deduplicate work to + * verify their contents if {@code ?verify_blob_contents} is set. + */ + private record ShardContainerContents( + int shardId, + Map blobsByName, + @Nullable /* if shard gen is not defined */ + ShardGeneration shardGeneration, + @Nullable /* if shard gen blob could not be read */ + Map> filesByPhysicalNameBySnapshotName, + Map> blobContentsListeners + ) {} + + /** + * Verifies the integrity of the shard snapshots of a specific index snapshot + */ + private class ShardSnapshotsVerifier { + private final SnapshotId snapshotId; + private final IndexDescription indexDescription; + private final AtomicInteger restorableShardCount = new AtomicInteger(); + + ShardSnapshotsVerifier(SnapshotId snapshotId, IndexDescription indexDescription) { + this.snapshotId = snapshotId; + this.indexDescription = indexDescription; + } + + void run(ActionListener listener) { + try (var listeners = new RefCountingListener(1, listener.map(v -> { + if (unreadableSnapshotInfoUuids.contains(snapshotId.getUUID()) == false + && indexDescription.shardCount() == restorableShardCount.get()) { + restorableSnapshotCounter.incrementAndGet(); + } + return v; + }))) { + for (int shardId = 0; shardId < indexDescription.shardCount(); shardId++) { + if (failedShardSnapshotDescriptions.contains(getShardSnapshotDescription(snapshotId, indexId.getName(), shardId))) { + continue; + } + + shardContainerContentsDeduplicator.get(shardId) + // deduplicating reads of shard container contents + .andThen((l, shardContainerContents) -> { + if (shardContainerContents == null) { + // shard container contents was unreadable; anomaly already reported, skip further verification + l.onResponse(null); + } else { + new ShardSnapshotVerifier(shardContainerContents).run(l); + } + }) + .addListener(listeners.acquire()); + } + } + } + + /** + * Verifies the integrity of a specific shard snapshot + */ + private class ShardSnapshotVerifier { + private final ShardContainerContents shardContainerContents; + private volatile boolean isRestorable = true; + + ShardSnapshotVerifier(ShardContainerContents shardContainerContents) { + this.shardContainerContents = shardContainerContents; + } + + void run(ActionListener listener) { + metadataTaskRunner.run(ActionRunnable.wrap(listener, this::verifyShardSnapshot)); + } + + private void verifyShardSnapshot(ActionListener listener) { + final var shardId = shardContainerContents.shardId(); + final BlobStoreIndexShardSnapshot blobStoreIndexShardSnapshot; + try { + blobStoreIndexShardSnapshot = blobStoreRepository.loadShardSnapshot( + blobStoreRepository.shardContainer(indexId, shardId), + snapshotId + ); + } catch (Exception e) { + anomaly("failed to load shard snapshot").snapshotId(snapshotId) + .shardDescription(indexDescription, shardId) + .exception(e) + .write(listener); + return; + } + + final var listeners = new RefCountingListener(1, listener.map(v -> { + if (isRestorable) { + restorableShardCount.incrementAndGet(); + } + return v; + })); + final var shardGenerationConsistencyListener = listeners.acquire(); + + runThrottled( + Iterators.failFast( + blobStoreIndexShardSnapshot.indexFiles().iterator(), + () -> isCancelledSupplier.getAsBoolean() || listeners.isFailing() + ), + (releasable, fileInfo) -> verifyFileInfo(fileInfo, ActionListener.releaseAfter(listeners.acquire(), releasable)), + 1, + blobsVerified, + listeners + ); + + // NB this next step doesn't matter for restorability, it is just verifying that the shard gen blob matches the shard + // snapshot blob + verifyShardGenerationConsistency(blobStoreIndexShardSnapshot, shardGenerationConsistencyListener); + } + + /** + * Checks that the given {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo} matches + * the actual blob in the repository. + */ + private void verifyFileInfo(BlobStoreIndexShardSnapshot.FileInfo fileInfo, ActionListener listener) { + if (fileInfo.metadata().hashEqualsContents()) { + listener.onResponse(null); + return; + } + + for (int partIndex = 0; partIndex < fileInfo.numberOfParts(); partIndex++) { + final var blobName = fileInfo.partName(partIndex); + final var blobInfo = shardContainerContents.blobsByName().get(blobName); + if (blobInfo == null) { + isRestorable = false; + String physicalFileName = fileInfo.physicalName(); + anomaly("missing blob").snapshotId(snapshotId) + .shardDescription(indexDescription, shardContainerContents.shardId()) + .blobName(blobName, physicalFileName) + .part(partIndex, fileInfo.numberOfParts()) + .fileLength(ByteSizeValue.ofBytes(fileInfo.length())) + .partLength(ByteSizeValue.ofBytes(fileInfo.partBytes(partIndex))) + .write(listener); + return; + } else if (blobInfo.length() != fileInfo.partBytes(partIndex)) { + isRestorable = false; + String physicalFileName = fileInfo.physicalName(); + ByteSizeValue blobLength = ByteSizeValue.ofBytes(blobInfo.length()); + anomaly("mismatched blob length").snapshotId(snapshotId) + .shardDescription(indexDescription, shardContainerContents.shardId()) + .blobName(blobName, physicalFileName) + .part(partIndex, fileInfo.numberOfParts()) + .fileLength(ByteSizeValue.ofBytes(fileInfo.length())) + .partLength(ByteSizeValue.ofBytes(fileInfo.partBytes(partIndex))) + .blobLength(blobLength) + .write(listener); + return; + } + } + + // NB adding a listener whether ?verify_blob_contents is set or not - we want to track the blob sizes either way + blobContentsListeners(indexDescription, shardContainerContents, fileInfo).addListener( + listener.delegateResponse((l, e) -> { + isRestorable = false; + String physicalFileName = fileInfo.physicalName(); + anomaly("corrupt data blob").snapshotId(snapshotId) + .shardDescription(indexDescription, shardContainerContents.shardId()) + .blobName(fileInfo.name(), physicalFileName) + .part(-1, fileInfo.numberOfParts()) + .fileLength(ByteSizeValue.ofBytes(fileInfo.length())) + .exception(e) + .write(l); + }) + ); + } + + /** + * Checks that the shard generation blob has the right content for this shard snapshot. + */ + private void verifyShardGenerationConsistency( + BlobStoreIndexShardSnapshot blobStoreIndexShardSnapshot, + ActionListener listener + ) { + final var summaryFilesByPhysicalNameBySnapshotName = shardContainerContents.filesByPhysicalNameBySnapshotName(); + if (summaryFilesByPhysicalNameBySnapshotName == null) { + // couldn't read shard gen blob at all - already reported, nothing more to do here + listener.onResponse(null); + return; + } + + final var shardId = shardContainerContents.shardId(); + + final var summaryFilesByPhysicalName = summaryFilesByPhysicalNameBySnapshotName.get(snapshotId.getName()); + if (summaryFilesByPhysicalName == null) { + anomaly("snapshot not in shard generation").snapshotId(snapshotId) + .shardDescription(indexDescription, shardId) + .shardGeneration(shardContainerContents.shardGeneration()) + .write(listener); + return; + } + + final var snapshotFiles = getFilesByPhysicalName(blobStoreIndexShardSnapshot.indexFiles()); + + for (final var summaryFile : summaryFilesByPhysicalName.values()) { + final var snapshotFile = snapshotFiles.get(summaryFile.physicalName()); + if (snapshotFile == null) { + anomaly("blob in shard generation but not snapshot").snapshotId(snapshotId) + .shardDescription(indexDescription, shardId) + .shardGeneration(shardContainerContents.shardGeneration()) + .physicalFileName(summaryFile.physicalName()) + .write(listener); + return; + } else if (summaryFile.isSame(snapshotFile) == false) { + anomaly("snapshot shard generation mismatch").snapshotId(snapshotId) + .shardDescription(indexDescription, shardId) + .shardGeneration(shardContainerContents.shardGeneration()) + .physicalFileName(summaryFile.physicalName()) + .write(listener); + return; + } + } + + for (final var snapshotFile : blobStoreIndexShardSnapshot.indexFiles()) { + if (summaryFilesByPhysicalName.get(snapshotFile.physicalName()) == null) { + anomaly("blob in snapshot but not shard generation").snapshotId(snapshotId) + .shardDescription(indexDescription, shardId) + .shardGeneration(shardContainerContents.shardGeneration()) + .physicalFileName(snapshotFile.physicalName()) + .write(listener); + return; + } + } + + listener.onResponse(null); + } + } + } + + /** + * Exposes {@link IndexDescription} per index-metadata-blob (particularly the shard count), caching the value on first read + * to avoid duplicate work. + */ + private class IndexDescriptionsDeduplicator { + private final Map> listenersByBlobId = newConcurrentMap(); + + SubscribableListener get(SnapshotId snapshotId) { + final var indexMetaBlobId = repositoryData.indexMetaDataGenerations().indexMetaBlobId(snapshotId, indexId); + return listenersByBlobId.computeIfAbsent( + indexMetaBlobId, + ignored -> SubscribableListener.newForked( + indexDescriptionListener -> metadataTaskRunner.run( + ActionRunnable.wrap(indexDescriptionListener, l -> load(snapshotId, indexMetaBlobId, l)) + ) + ) + ); + } + + private void load(SnapshotId snapshotId, String indexMetaBlobId, ActionListener listener) { + try { + listener.onResponse( + new IndexDescription( + indexId, + indexMetaBlobId, + blobStoreRepository.getSnapshotIndexMetaData(repositoryData, snapshotId, indexId).getNumberOfShards() + ) + ); + } catch (Exception e) { + anomaly("failed to load index metadata").indexDescription(new IndexDescription(indexId, indexMetaBlobId, 0)) + .exception(e) + .write(listener.map(v -> null)); + } + } + } + + /** + * Exposes {@link ShardContainerContents} per shard, caching the value on the first read to avoid duplicate work. + */ + private class ShardContainerContentsDeduplicator { + private final Map> listenersByShardId = newConcurrentMap(); + + SubscribableListener get(int shardId) { + return listenersByShardId.computeIfAbsent( + shardId, + ignored -> SubscribableListener.newForked( + shardContainerContentsListener -> metadataTaskRunner.run( + ActionRunnable.wrap(shardContainerContentsListener, l -> load(shardId, l)) + ) + ) + ); + } + + private void load(int shardId, ActionListener listener) { + final var indexDescription = new IndexDescription(indexId, null, 0); + + final Map blobsByName; + try { + blobsByName = blobStoreRepository.shardContainer(indexId, shardId).listBlobs(OperationPurpose.REPOSITORY_ANALYSIS); + } catch (Exception e) { + anomaly("failed to list shard container contents").shardDescription(new IndexDescription(indexId, null, 0), shardId) + .exception(e) + .write(listener.map(v -> null)); + return; + } + + final var shardGen = repositoryData.shardGenerations().getShardGen(indexId, shardId); + if (shardGen == null) { + anomaly("shard generation not defined").shardDescription(indexDescription, shardId) + .write( + listener.map( + // NB we don't need the shard gen to do most of the rest of the verification, so we set it to null and + // carry on: + v -> new ShardContainerContents(shardId, blobsByName, null, null, ConcurrentCollections.newConcurrentMap()) + ) + ); + return; + } + + SubscribableListener + // try and load the shard gen blob + .newForked(l -> { + try { + l.onResponse(blobStoreRepository.getBlobStoreIndexShardSnapshots(indexId, shardId, shardGen)); + } catch (Exception e) { + // failing here is not fatal to snapshot restores, only to creating/deleting snapshots, so we can return null + // and carry on with the analysis + anomaly("failed to load shard generation").shardDescription(indexDescription, shardId) + .shardGeneration(shardGen) + .exception(e) + .write(l.map(v -> null)); + } + }) + .andThenApply( + blobStoreIndexShardSnapshots -> new ShardContainerContents( + shardId, + blobsByName, + shardGen, + getFilesByPhysicalNameBySnapshotName(blobStoreIndexShardSnapshots), + ConcurrentCollections.newConcurrentMap() + ) + ) + .addListener(listener); + } + + private static Map> getFilesByPhysicalNameBySnapshotName( + BlobStoreIndexShardSnapshots blobStoreIndexShardSnapshots + ) { + if (blobStoreIndexShardSnapshots == null) { + return null; + } + + final Map> filesByPhysicalNameBySnapshotName = Maps + .newHashMapWithExpectedSize(blobStoreIndexShardSnapshots.snapshots().size()); + for (final var snapshotFiles : blobStoreIndexShardSnapshots.snapshots()) { + filesByPhysicalNameBySnapshotName.put(snapshotFiles.snapshot(), getFilesByPhysicalName(snapshotFiles.indexFiles())); + } + return filesByPhysicalNameBySnapshotName; + } + } + + private SubscribableListener blobContentsListeners( + IndexDescription indexDescription, + ShardContainerContents shardContainerContents, + BlobStoreIndexShardSnapshot.FileInfo fileInfo + ) { + return shardContainerContents.blobContentsListeners().computeIfAbsent(fileInfo.name(), ignored -> { + if (requestParams.verifyBlobContents()) { + return SubscribableListener.newForked(listener -> snapshotTaskRunner.run(ActionRunnable.run(listener, () -> { + try (var slicedStream = new SlicedInputStream(fileInfo.numberOfParts()) { + @Override + protected InputStream openSlice(int slice) throws IOException { + return blobStoreRepository.shardContainer(indexDescription.indexId(), shardContainerContents.shardId()) + .readBlob(OperationPurpose.REPOSITORY_ANALYSIS, fileInfo.partName(slice)); + } + }; + var rateLimitedStream = new RateLimitingInputStream(slicedStream, () -> rateLimiter, throttledNanos::addAndGet); + var indexInput = new IndexInputWrapper(rateLimitedStream, fileInfo.length()) + ) { + CodecUtil.checksumEntireFile(indexInput); + } + }))); + } else { + blobBytesVerified.addAndGet(fileInfo.length()); + return SubscribableListener.newSucceeded(null); + } + }); + } + } + + private static String getShardSnapshotDescription(SnapshotId snapshotId, String index, int shardId) { + return snapshotId.getUUID() + "/" + index + "/" + shardId; + } + + private static Map getFilesByPhysicalName( + List fileInfos + ) { + final Map filesByPhysicalName = Maps.newHashMapWithExpectedSize(fileInfos.size()); + for (final var fileInfo : fileInfos) { + filesByPhysicalName.put(fileInfo.physicalName(), fileInfo); + } + return filesByPhysicalName; + } + + private static void runThrottled( + Iterator iterator, + BiConsumer itemConsumer, + int maxConcurrency, + AtomicLong progressCounter, + Releasable onCompletion + ) { + ThrottledIterator.run(iterator, itemConsumer, maxConcurrency, progressCounter::incrementAndGet, onCompletion::close); + } + + private RepositoryVerifyIntegrityResponseChunk.Builder anomaly(String anomaly) { + return new RepositoryVerifyIntegrityResponseChunk.Builder( + responseChunkWriter, + RepositoryVerifyIntegrityResponseChunk.Type.ANOMALY, + currentTimeMillisSupplier.getAsLong() + ).anomaly(anomaly); + } + + public long getSnapshotCount() { + return snapshotCount; + } + + public long getIndexCount() { + return indexCount; + } + + public long getIndexSnapshotCount() { + return indexSnapshotCount; + } + + private class IndexInputWrapper extends IndexInput { + private final InputStream inputStream; + private final long length; + long filePointer = 0L; + + IndexInputWrapper(InputStream inputStream, long length) { + super(""); + this.inputStream = inputStream; + this.length = length; + } + + @Override + public byte readByte() throws IOException { + if (isCancelledSupplier.getAsBoolean()) { + throw new TaskCancelledException("task cancelled"); + } + final var read = inputStream.read(); + if (read == -1) { + throw new EOFException(); + } + filePointer += 1; + blobBytesVerified.incrementAndGet(); + return (byte) read; + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + while (len > 0) { + if (isCancelledSupplier.getAsBoolean()) { + throw new TaskCancelledException("task cancelled"); + } + final var read = inputStream.read(b, offset, len); + if (read == -1) { + throw new EOFException(); + } + filePointer += read; + blobBytesVerified.addAndGet(read); + len -= read; + offset += read; + } + } + + @Override + public void close() {} + + @Override + public long getFilePointer() { + return filePointer; + } + + @Override + public void seek(long pos) { + if (filePointer != pos) { + assert false : "cannot seek"; + throw new UnsupportedOperationException("seek"); + } + } + + @Override + public long length() { + return length; + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) { + assert false; + throw new UnsupportedOperationException("slice"); + } + } + + private static class CancellableRunner { + private final ThrottledTaskRunner delegate; + private final CancellableThreads cancellableThreads; + + CancellableRunner(ThrottledTaskRunner delegate, CancellableThreads cancellableThreads) { + this.delegate = delegate; + this.cancellableThreads = cancellableThreads; + } + + void run(AbstractRunnable runnable) { + delegate.enqueueTask(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + try (releasable) { + if (cancellableThreads.isCancelled()) { + runnable.onFailure(new TaskCancelledException("task cancelled")); + } else { + cancellableThreads.execute(runnable::run); + } + } + } + + @Override + public void onFailure(Exception e) { + runnable.onFailure(e); + } + }); + } + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityParams.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityParams.java new file mode 100644 index 0000000000000..61a58c0da8df0 --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityParams.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Objects; + +/** + * Parameters of a repository-verity-integrity request. + * + * @param repository the name of the repository whose integrity to verify. + * @param metaThreadPoolConcurrency the number of concurrent tasks to execute on the {@link ThreadPool.Names#SNAPSHOT_META} pool, or + * {@code 0} to use a sensible default. + * @param blobThreadPoolConcurrency the number of concurrent tasks to execute on the {@link ThreadPool.Names#SNAPSHOT} pool, or {@code 0} + * to use a sensible default. + * @param snapshotVerificationConcurrency the number of snapshots to verify concurrently, or {@code 0} to use a sensible default. + * @param indexVerificationConcurrency the number of indices to verify concurrently, or {@code 0} to use a sensible default. + * @param indexSnapshotVerificationConcurrency the number of snapshots to verify concurrently for each index, or {@code 0} to use a sensible + * default. + * @param maxFailedShardSnapshots the maximum number of shard snapshots failures to track - we must build a list of all of them in memory + * to avoid reporting spurious anomalies, and this can be overwhelming in a very broken repository. + * @param verifyBlobContents whether to verify the contents of each data blob (which is very expensive). + * @param maxBytesPerSec rate limit to use for blob contents verification. + */ +public record RepositoryVerifyIntegrityParams( + String repository, + int metaThreadPoolConcurrency, + int blobThreadPoolConcurrency, + int snapshotVerificationConcurrency, + int indexVerificationConcurrency, + int indexSnapshotVerificationConcurrency, + int maxFailedShardSnapshots, + boolean verifyBlobContents, + ByteSizeValue maxBytesPerSec +) implements Writeable { + + public static final String MAX_FAILED_SHARD_SNAPSHOTS = "max_failed_shard_snapshots"; + + public RepositoryVerifyIntegrityParams { + Objects.requireNonNull(repository, "repository"); + requireNonNegative("meta_thread_pool_concurrency", metaThreadPoolConcurrency); + requireNonNegative("blob_thread_pool_concurrency", blobThreadPoolConcurrency); + requireNonNegative("snapshot_verification_concurrency", snapshotVerificationConcurrency); + requireNonNegative("index_verification_concurrency", indexVerificationConcurrency); + requireNonNegative("index_snapshot_verification_concurrency", indexSnapshotVerificationConcurrency); + requireNonNegative(MAX_FAILED_SHARD_SNAPSHOTS, maxFailedShardSnapshots); + if (maxBytesPerSec.getBytes() < 1) { + throw new IllegalArgumentException("invalid rate limit"); + } + } + + private static void requireNonNegative(String name, int value) { + if (value < 0) { + throw new IllegalArgumentException("argument [" + name + "] must be at least [0]"); + } + } + + RepositoryVerifyIntegrityParams(RestRequest restRequest) { + this( + restRequest.param("repository"), + restRequest.paramAsInt("meta_thread_pool_concurrency", 0), + restRequest.paramAsInt("blob_thread_pool_concurrency", 0), + restRequest.paramAsInt("snapshot_verification_concurrency", 0), + restRequest.paramAsInt("index_verification_concurrency", 0), + restRequest.paramAsInt("index_snapshot_verification_concurrency", 0), + restRequest.paramAsInt(MAX_FAILED_SHARD_SNAPSHOTS, 0), + restRequest.paramAsBoolean("verify_blob_contents", false), + restRequest.paramAsSize("max_bytes_per_sec", ByteSizeValue.ofMb(40)) + ); + } + + RepositoryVerifyIntegrityParams(StreamInput in) throws IOException { + this( + in.readString(), + in.readVInt(), + in.readVInt(), + in.readVInt(), + in.readVInt(), + in.readVInt(), + in.readVInt(), + in.readBoolean(), + ByteSizeValue.readFrom(in) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(repository); + out.writeVInt(metaThreadPoolConcurrency); + out.writeVInt(blobThreadPoolConcurrency); + out.writeVInt(snapshotVerificationConcurrency); + out.writeVInt(indexVerificationConcurrency); + out.writeVInt(indexSnapshotVerificationConcurrency); + out.writeVInt(maxFailedShardSnapshots); + out.writeBoolean(verifyBlobContents); + maxBytesPerSec.writeTo(out); + } + + public RepositoryVerifyIntegrityParams withResolvedDefaults(ThreadPool.Info metadataThreadPoolInfo) { + if (metaThreadPoolConcurrency > 0 + && blobThreadPoolConcurrency > 0 + && snapshotVerificationConcurrency > 0 + && indexVerificationConcurrency > 0 + && indexSnapshotVerificationConcurrency > 0 + && maxFailedShardSnapshots > 0) { + return this; + } + + final var maxThreads = Math.max(1, metadataThreadPoolInfo.getMax()); + final var halfMaxThreads = Math.max(1, maxThreads / 2); + return new RepositoryVerifyIntegrityParams( + repository, + metaThreadPoolConcurrency > 0 ? metaThreadPoolConcurrency : halfMaxThreads, + blobThreadPoolConcurrency > 0 ? blobThreadPoolConcurrency : 1, + snapshotVerificationConcurrency > 0 ? snapshotVerificationConcurrency : halfMaxThreads, + indexVerificationConcurrency > 0 ? indexVerificationConcurrency : maxThreads, + indexSnapshotVerificationConcurrency > 0 ? indexSnapshotVerificationConcurrency : 1, + maxFailedShardSnapshots > 0 ? maxFailedShardSnapshots : 10000, + verifyBlobContents, + maxBytesPerSec + ); + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityResponse.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityResponse.java new file mode 100644 index 0000000000000..eff6ed7eb465d --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityResponse.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class RepositoryVerifyIntegrityResponse extends ActionResponse { + private final RepositoryVerifyIntegrityTask.Status finalTaskStatus; + private final long finalRepositoryGeneration; + + RepositoryVerifyIntegrityResponse(RepositoryVerifyIntegrityTask.Status finalTaskStatus, long finalRepositoryGeneration) { + this.finalTaskStatus = finalTaskStatus; + this.finalRepositoryGeneration = finalRepositoryGeneration; + } + + RepositoryVerifyIntegrityResponse(StreamInput in) throws IOException { + finalRepositoryGeneration = in.readLong(); + finalTaskStatus = new RepositoryVerifyIntegrityTask.Status(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(finalRepositoryGeneration); + finalTaskStatus.writeTo(out); + } + + public long finalRepositoryGeneration() { + return finalRepositoryGeneration; + } + + public RepositoryVerifyIntegrityTask.Status finalTaskStatus() { + return finalTaskStatus; + } + + public long originalRepositoryGeneration() { + return finalTaskStatus.repositoryGeneration(); + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityResponseChunk.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityResponseChunk.java new file mode 100644 index 0000000000000..90130811c1218 --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityResponseChunk.java @@ -0,0 +1,355 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.ShardGeneration; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A chunk of response to be streamed to the waiting client. + * + * @param type indicates the type of this chunk. + * @param anomaly a textual description of the anomaly found, or {@code null} if this chunk does not describe an anomaly. + * @param snapshotId the ID of the snapshot to which this chunk pertains, or {@code null} if this chunk does not pertain to a particular + * snapshot. + * @param snapshotInfo the raw {@link SnapshotInfo} for the snapshot, or {@code null}. + * @param indexDescription information about the index to which this chunk pertains, or {@code null} if this chunk does not pertain to + * a particular index. + * @param shardId the ID of the shard to which this chunk pertains, or {@code -1} if this chunk does not pertain to a particular shard. + * @param shardGeneration the {@link ShardGeneration} for the given shard, or {@code null} if not relevant. + * @param blobName the name of the blob to which this chunk pertains, or {@code null} if this chunk does not pertain to a particular blob. + * @param physicalFileName the name of the Lucene file to which this chunk pertains, or {@code null} if this chunk does not pertain to a + * particular Lucene file. + * @param partIndex the index of the part of the file represented by the blob to which this chunk pertains, or {@code -1} if this chunk does + * not pertain to a particular part. + * @param partCount the number of parts into which the file to which this chunk pertains is divided, or {@code -1} if not applicable. + * @param fileLength the length of the Lucene file to which this chunk pertains, or {@link ByteSizeValue#MINUS_ONE} if not applicable. + * @param partLength the length of the file part to which this chunk pertains, or {@link ByteSizeValue#MINUS_ONE} if not applicable. + * @param blobLength the length of the blob to which this chunk pertains, or {@link ByteSizeValue#MINUS_ONE} if not applicable. + * @param totalSnapshotCount the total number of snapshots which involve the index to which this chunk pertains, or {@code -1} if not + * applicable. + * @param restorableSnapshotCount the number of restorable snapshots which involve the index to which this chunk pertains, or {@code -1} if + * not applicable. + * @param exception an exception which relates to the failure described by this chunk, or {@code null} if not applicable. + */ +public record RepositoryVerifyIntegrityResponseChunk( + long timestampMillis, + Type type, + @Nullable String anomaly, + @Nullable SnapshotId snapshotId, + @Nullable SnapshotInfo snapshotInfo, + @Nullable IndexDescription indexDescription, + int shardId, + @Nullable ShardGeneration shardGeneration, + @Nullable String blobName, + @Nullable String physicalFileName, + int partIndex, + int partCount, + ByteSizeValue fileLength, + ByteSizeValue partLength, + ByteSizeValue blobLength, + int totalSnapshotCount, + int restorableSnapshotCount, + @Nullable Exception exception +) implements Writeable, ToXContentFragment { + + public enum Type { + /** + * The first chunk sent. Used to indicate that the verification has successfully started, and therefore we should start to send a + * 200 OK response to the client. + */ + START_RESPONSE, + + /** + * This chunk contains the raw {@link SnapshotInfo} for a snapshot. + */ + SNAPSHOT_INFO, + + /** + * This chunk contains information about the restorability of an index. + */ + INDEX_RESTORABILITY, + + /** + * This chunk describes an anomaly found during verification. + */ + ANOMALY, + } + + public RepositoryVerifyIntegrityResponseChunk { + if (fileLength == null + || partLength == null + || blobLength == null + || shardId < -1 + || partIndex < -1 + || partCount < -1 + || totalSnapshotCount < -1 + || restorableSnapshotCount < -1 + || (totalSnapshotCount >= 0 != restorableSnapshotCount >= 0)) { + throw new IllegalArgumentException("invalid: " + this); + } + } + + public RepositoryVerifyIntegrityResponseChunk(StreamInput in) throws IOException { + this( + in.readVLong(), + // TODO enum serialization tests + in.readEnum(Type.class), + in.readOptionalString(), + in.readOptionalWriteable(SnapshotId::new), + in.readOptionalWriteable(SnapshotInfo::readFrom), + in.readOptionalWriteable(IndexDescription::new), + in.readInt(), + in.readOptionalWriteable(ShardGeneration::new), + in.readOptionalString(), + in.readOptionalString(), + in.readInt(), + in.readInt(), + ByteSizeValue.readFrom(in), + ByteSizeValue.readFrom(in), + ByteSizeValue.readFrom(in), + in.readInt(), + in.readInt(), + in.readOptional(StreamInput::readException) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(timestampMillis); + out.writeEnum(type); + out.writeOptionalString(anomaly); + out.writeOptionalWriteable(snapshotId); + out.writeOptionalWriteable(snapshotInfo); + out.writeOptionalWriteable(indexDescription); + out.writeInt(shardId); + out.writeOptionalWriteable(shardGeneration); + out.writeOptionalString(blobName); + out.writeOptionalString(physicalFileName); + out.writeInt(partIndex); + out.writeInt(partCount); + fileLength.writeTo(out); + partLength.writeTo(out); + blobLength.writeTo(out); + out.writeInt(totalSnapshotCount); + out.writeInt(restorableSnapshotCount); + out.writeOptional(StreamOutput::writeException, exception); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.timeField("timestamp_in_millis", "timestamp", timestampMillis); + + if (anomaly() != null) { + builder.field("anomaly", anomaly()); + } + + if (snapshotInfo() != null) { + builder.field("snapshot"); + snapshotInfo().toXContentExternal(builder, params); + } else if (snapshotId() != null) { + builder.startObject("snapshot"); + builder.field("snapshot", snapshotId().getName()); + builder.field("uuid", snapshotId().getUUID()); + builder.endObject(); + } + + if (indexDescription() != null) { + builder.field("index", indexDescription(), params); + } + if (shardId() >= 0) { + builder.field("shard_id", shardId()); + } + if (shardGeneration() != null) { + builder.field("shard_generation", shardGeneration(), params); + } + if (blobName() != null) { + builder.field("blob_name", blobName()); + } + if (physicalFileName() != null) { + builder.field("physical_file_name", physicalFileName()); + } + if (partIndex() >= 0) { + builder.field("part_index", partIndex()); + } + if (partCount() >= 0) { + builder.field("part_count", partCount()); + } + if (fileLength() != ByteSizeValue.MINUS_ONE) { + builder.humanReadableField("file_length_in_bytes", "file_length", fileLength()); + } + if (partLength() != ByteSizeValue.MINUS_ONE) { + builder.humanReadableField("part_length_in_bytes", "part_length", partLength()); + } + if (blobLength() != ByteSizeValue.MINUS_ONE) { + builder.humanReadableField("blob_length_in_bytes", "blob_length", blobLength()); + } + if (totalSnapshotCount() >= 0 && restorableSnapshotCount() >= 0) { + builder.startObject("snapshot_restorability"); + builder.field("total_snapshot_count", totalSnapshotCount()); + builder.field("restorable_snapshot_count", restorableSnapshotCount()); + builder.endObject(); + } + if (exception() != null) { + builder.startObject("exception") + .value((bb, pp) -> ElasticsearchException.generateFailureXContent(bb, pp, exception(), true)) + .field("status", ExceptionsHelper.status(exception())) + .endObject(); + } + return builder; + } + + static class Builder { + private final Writer responseWriter; + private final Type type; + private final long timestampMillis; + + private String anomaly; + private SnapshotId snapshotId; + private SnapshotInfo snapshotInfo; + private IndexDescription indexDescription; + private int shardId = -1; + private ShardGeneration shardGeneration; + private String blobName; + private String physicalFileName; + private int partIndex = -1; + private int partCount = -1; + private ByteSizeValue fileLength = ByteSizeValue.MINUS_ONE; + private ByteSizeValue partLength = ByteSizeValue.MINUS_ONE; + private ByteSizeValue blobLength = ByteSizeValue.MINUS_ONE; + private int totalSnapshotCount = -1; + private int restorableSnapshotCount = -1; + private Exception exception; + + Builder(Writer responseWriter, Type type, long timestampMillis) { + this.responseWriter = responseWriter; + this.type = type; + this.timestampMillis = timestampMillis; + } + + Builder anomaly(String anomaly) { + this.anomaly = anomaly; + return this; + } + + Builder snapshotId(SnapshotId snapshotId) { + this.snapshotId = snapshotId; + return this; + } + + Builder snapshotInfo(SnapshotInfo snapshotInfo) { + this.snapshotInfo = snapshotInfo; + return this; + } + + Builder indexDescription(IndexDescription indexDescription) { + this.indexDescription = indexDescription; + return this; + } + + Builder shardDescription(IndexDescription indexDescription, int shardId) { + this.indexDescription = indexDescription; + this.shardId = shardId; + return this; + } + + Builder shardGeneration(ShardGeneration shardGeneration) { + this.shardGeneration = shardGeneration; + return this; + } + + Builder blobName(String blobName, String physicalFileName) { + this.blobName = blobName; + this.physicalFileName = physicalFileName; + return this; + } + + Builder physicalFileName(String physicalFileName) { + this.physicalFileName = physicalFileName; + return this; + } + + Builder part(int partIndex, int partCount) { + this.partIndex = partIndex; + this.partCount = partCount; + return this; + } + + Builder fileLength(ByteSizeValue fileLength) { + this.fileLength = Objects.requireNonNull(fileLength); + return this; + } + + Builder partLength(ByteSizeValue partLength) { + this.partLength = Objects.requireNonNull(partLength); + return this; + } + + Builder blobLength(ByteSizeValue blobLength) { + this.blobLength = Objects.requireNonNull(blobLength); + return this; + } + + Builder indexRestorability(IndexId indexId, int totalSnapshotCount, int restorableSnapshotCount) { + this.indexDescription = new IndexDescription(indexId, null, 0); + this.totalSnapshotCount = totalSnapshotCount; + this.restorableSnapshotCount = restorableSnapshotCount; + return this; + } + + Builder exception(Exception exception) { + this.exception = exception; + return this; + } + + void write(ActionListener listener) { + responseWriter.writeResponseChunk( + new RepositoryVerifyIntegrityResponseChunk( + timestampMillis, + type, + anomaly, + snapshotId, + snapshotInfo, + indexDescription, + shardId, + shardGeneration, + blobName, + physicalFileName, + partIndex, + partCount, + fileLength, + partLength, + blobLength, + totalSnapshotCount, + restorableSnapshotCount, + exception + ), + ActionListener.assertOnce(listener) + ); + } + } + + interface Writer { + void writeResponseChunk(RepositoryVerifyIntegrityResponseChunk responseChunk, ActionListener listener); + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityResponseStream.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityResponseStream.java new file mode 100644 index 0000000000000..7ea9bfe6f2b23 --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityResponseStream.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.StreamingXContentResponse; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Represents a (possibly-streaming) response to the repository-verify-integrity API. + */ +class RepositoryVerifyIntegrityResponseStream extends AbstractRefCounted { + // ref-counting discipline: + // - one ref added at creation in the REST layer and released there by the listener returned from getCompletionListener() + // - one ref held for every response chunk while it is being added to the fragment queue + // thus when all refs are released the transport-layer coordinating action is complete and no more trailing fragments can be added, + // so we can send the last response fragment. + + private static final Logger logger = LogManager.getLogger(RepositoryVerifyIntegrityResponseStream.class); + + private final RestChannel restChannel; + + private final SubscribableListener finalResultListener = new SubscribableListener<>(); + + // the listener exposed to the transport response handler + private final ActionListener completionListener = ActionListener.assertOnce( + ActionListener.releaseAfter(finalResultListener, this::decRef) + ); + + // set in startResponse() which completes before any calls to writeChunk() or closeInternal() so no need to be volatile + @Nullable // if not yet started + private StreamingXContentResponse streamingXContentResponse; + + private final AtomicLong anomalyCount = new AtomicLong(); + + RepositoryVerifyIntegrityResponseStream(RestChannel restChannel) { + this.restChannel = restChannel; + } + + void startResponse(Releasable releasable) throws IOException { + assert hasReferences(); + assert streamingXContentResponse == null; + streamingXContentResponse = new StreamingXContentResponse(restChannel, restChannel.request(), () -> {}); + streamingXContentResponse.writeFragment( + p0 -> ChunkedToXContentHelper.singleChunk((b, p) -> b.startObject().startArray("log")), + releasable + ); + } + + void writeChunk(RepositoryVerifyIntegrityResponseChunk chunk, Releasable releasable) { + assert hasReferences(); + assert streamingXContentResponse != null; + + if (chunk.type() == RepositoryVerifyIntegrityResponseChunk.Type.ANOMALY) { + anomalyCount.incrementAndGet(); + } + streamingXContentResponse.writeFragment( + p0 -> ChunkedToXContentHelper.singleChunk((b, p) -> b.startObject().value(chunk, p).endObject()), + releasable + ); + } + + @Override + protected void closeInternal() { + try { + assert finalResultListener.isDone(); + finalResultListener.addListener(new ActionListener<>() { + @Override + public void onResponse(RepositoryVerifyIntegrityResponse repositoryVerifyIntegrityResponse) { + // success - finish the response with the final results + assert streamingXContentResponse != null; + streamingXContentResponse.writeFragment( + p0 -> ChunkedToXContentHelper.singleChunk( + (b, p) -> b.endArray() + .startObject("results") + .field("status", repositoryVerifyIntegrityResponse.finalTaskStatus()) + .field("final_repository_generation", repositoryVerifyIntegrityResponse.finalRepositoryGeneration()) + .field("total_anomalies", anomalyCount.get()) + .field( + "result", + anomalyCount.get() == 0 + ? repositoryVerifyIntegrityResponse + .originalRepositoryGeneration() == repositoryVerifyIntegrityResponse.finalRepositoryGeneration() + ? "pass" + : "inconclusive due to concurrent writes" + : "fail" + ) + .endObject() + .endObject() + ), + () -> {} + ); + } + + @Override + public void onFailure(Exception e) { + if (streamingXContentResponse != null) { + // failure after starting the response - finish the response with a rendering of the final exception + streamingXContentResponse.writeFragment( + p0 -> ChunkedToXContentHelper.singleChunk( + (b, p) -> b.endArray() + .startObject("exception") + .value((bb, pp) -> ElasticsearchException.generateFailureXContent(bb, pp, e, true)) + .field("status", ExceptionsHelper.status(e)) + .endObject() + .endObject() + ), + () -> {} + ); + } else { + // didn't even get as far as starting to stream the response, must have hit an early exception (e.g. repo not found) + // so we can return this exception directly. + try { + restChannel.sendResponse(new RestResponse(restChannel, e)); + } catch (IOException e2) { + e.addSuppressed(e2); + logger.error("error building error response", e); + assert false : e; // shouldn't actually throw anything here + restChannel.request().getHttpChannel().close(); + } + } + } + }); + } finally { + Releasables.closeExpectNoException(streamingXContentResponse); + } + } + + public ActionListener getCompletionListener() { + return completionListener; + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityTask.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityTask.java new file mode 100644 index 0000000000000..eaae913fe9c6f --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RepositoryVerifyIntegrityTask.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Optional; +import java.util.function.Supplier; + +public class RepositoryVerifyIntegrityTask extends CancellableTask { + + private volatile Supplier statusSupplier; + + public RepositoryVerifyIntegrityTask( + long id, + String type, + String action, + String description, + TaskId parentTaskId, + Map headers + ) { + super(id, type, action, description, parentTaskId, headers); + } + + public void setStatusSupplier(Supplier statusSupplier) { + this.statusSupplier = statusSupplier; + } + + @Override + public Status getStatus() { + return Optional.ofNullable(statusSupplier).map(Supplier::get).orElse(null); + } + + public record Status( + String repositoryName, + long repositoryGeneration, + String repositoryUUID, + long snapshotCount, + long snapshotsVerified, + long indexCount, + long indicesVerified, + long indexSnapshotCount, + long indexSnapshotsVerified, + long blobsVerified, + long blobBytesVerified, + long throttledNanos + ) implements org.elasticsearch.tasks.Task.Status { + + public static String NAME = "verify_repository_integrity_status"; + + public Status(StreamInput in) throws IOException { + this( + in.readString(), + in.readVLong(), + in.readString(), + in.readVLong(), + in.readVLong(), + in.readVLong(), + in.readVLong(), + in.readVLong(), + in.readVLong(), + in.readVLong(), + in.readVLong(), + in.readVLong() + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(repositoryName); + out.writeVLong(repositoryGeneration); + out.writeString(repositoryUUID); + out.writeVLong(snapshotCount); + out.writeVLong(snapshotsVerified); + out.writeVLong(indexCount); + out.writeVLong(indicesVerified); + out.writeVLong(indexSnapshotCount); + out.writeVLong(indexSnapshotsVerified); + out.writeVLong(blobsVerified); + out.writeVLong(blobBytesVerified); + out.writeVLong(throttledNanos); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("repository"); + builder.field("name", repositoryName); + builder.field("uuid", repositoryUUID); + builder.field("generation", repositoryGeneration); + builder.endObject(); + builder.startObject("snapshots"); + builder.field("verified", snapshotsVerified); + builder.field("total", snapshotCount); + builder.endObject(); + builder.startObject("indices"); + builder.field("verified", indicesVerified); + builder.field("total", indexCount); + builder.endObject(); + builder.startObject("index_snapshots"); + builder.field("verified", indexSnapshotsVerified); + builder.field("total", indexSnapshotCount); + builder.endObject(); + builder.startObject("blobs"); + builder.field("verified", blobsVerified); + if (throttledNanos > 0) { + builder.humanReadableField("verified_size_in_bytes", "verified_size", ByteSizeValue.ofBytes(blobBytesVerified)); + builder.humanReadableField("throttled_time_in_millis", "throttled_time", TimeValue.timeValueNanos(throttledNanos)); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RestRepositoryVerifyIntegrityAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RestRepositoryVerifyIntegrityAction.java new file mode 100644 index 0000000000000..16cdb9140411c --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/RestRepositoryVerifyIntegrityAction.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestCancellableNodeClient; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +@ServerlessScope(Scope.INTERNAL) +public class RestRepositoryVerifyIntegrityAction extends BaseRestHandler { + + public static final NodeFeature REPOSITORY_VERIFY_INTEGRITY_FEATURE = new NodeFeature("snapshot.repository_verify_integrity"); + + @Override + public List routes() { + return List.of(new Route(POST, "/_snapshot/{repository}/_verify_integrity")); + } + + @Override + public String getName() { + return "repository_verify_integrity"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { + final var requestParams = new RepositoryVerifyIntegrityParams(request); + return channel -> { + final var responseStream = new RepositoryVerifyIntegrityResponseStream(channel); + new RestCancellableNodeClient(client, request.getHttpChannel()).execute( + TransportRepositoryVerifyIntegrityCoordinationAction.INSTANCE, + new TransportRepositoryVerifyIntegrityCoordinationAction.Request(requestParams, responseStream), + responseStream.getCompletionListener() + ); + }; + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/TransportRepositoryVerifyIntegrityAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/TransportRepositoryVerifyIntegrityAction.java new file mode 100644 index 0000000000000..aa29f83341317 --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/TransportRepositoryVerifyIntegrityAction.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.CancellableThreads; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.Executor; +import java.util.function.LongSupplier; + +/** + * Transport action that actually runs the {@link RepositoryIntegrityVerifier} and sends response chunks back to the coordinating node. + */ +class TransportRepositoryVerifyIntegrityAction extends HandledTransportAction< + TransportRepositoryVerifyIntegrityAction.Request, + RepositoryVerifyIntegrityResponse> { + + // NB runs on the master because that's the expected place to read metadata blobs from the repository, but not an actual + // TransportMasterNodeAction since we don't want to retry on a master failover + + static final String ACTION_NAME = TransportRepositoryVerifyIntegrityCoordinationAction.INSTANCE.name() + "[m]"; + private final RepositoriesService repositoriesService; + private final TransportService transportService; + private final Executor executor; + + TransportRepositoryVerifyIntegrityAction( + TransportService transportService, + RepositoriesService repositoriesService, + ActionFilters actionFilters, + Executor executor + ) { + super(ACTION_NAME, transportService, actionFilters, TransportRepositoryVerifyIntegrityAction.Request::new, executor); + this.repositoriesService = repositoriesService; + this.transportService = transportService; + this.executor = executor; + } + + static class Request extends ActionRequest { + private final DiscoveryNode coordinatingNode; + private final long coordinatingTaskId; + private final RepositoryVerifyIntegrityParams requestParams; + + Request(DiscoveryNode coordinatingNode, long coordinatingTaskId, RepositoryVerifyIntegrityParams requestParams) { + this.coordinatingNode = coordinatingNode; + this.coordinatingTaskId = coordinatingTaskId; + this.requestParams = Objects.requireNonNull(requestParams); + } + + Request(StreamInput in) throws IOException { + super(in); + coordinatingNode = new DiscoveryNode(in); + coordinatingTaskId = in.readVLong(); + requestParams = new RepositoryVerifyIntegrityParams(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + coordinatingNode.writeTo(out); + out.writeVLong(coordinatingTaskId); + requestParams.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new RepositoryVerifyIntegrityTask(id, type, action, getDescription(), parentTaskId, headers); + } + } + + @Override + protected void doExecute(Task rawTask, Request request, ActionListener listener) { + final var responseWriter = new RepositoryVerifyIntegrityResponseChunk.Writer() { + + // no need to obtain a fresh connection each time - this connection shouldn't close, so if it does we can fail the verification + final Transport.Connection responseConnection = transportService.getConnection(request.coordinatingNode); + + @Override + public void writeResponseChunk(RepositoryVerifyIntegrityResponseChunk responseChunk, ActionListener listener) { + transportService.sendChildRequest( + responseConnection, + TransportRepositoryVerifyIntegrityResponseChunkAction.ACTION_NAME, + new TransportRepositoryVerifyIntegrityResponseChunkAction.Request(request.coordinatingTaskId, responseChunk), + rawTask, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler( + listener.map(ignored -> null), + in -> ActionResponse.Empty.INSTANCE, + executor + ) + ); + } + }; + + final LongSupplier currentTimeMillisSupplier = transportService.getThreadPool()::absoluteTimeInMillis; + final var repository = (BlobStoreRepository) repositoriesService.repository(request.requestParams.repository()); + final var task = (RepositoryVerifyIntegrityTask) rawTask; + + SubscribableListener + + .newForked(l -> repository.getRepositoryData(executor, l)) + .andThenApply(repositoryData -> { + final var cancellableThreads = new CancellableThreads(); + task.addListener(() -> cancellableThreads.cancel("task cancelled")); + final var verifier = new RepositoryIntegrityVerifier( + currentTimeMillisSupplier, + repository, + responseWriter, + request.requestParams.withResolvedDefaults(repository.threadPool().info(ThreadPool.Names.SNAPSHOT_META)), + repositoryData, + cancellableThreads + ); + task.setStatusSupplier(verifier::getStatus); + return verifier; + }) + .andThen( + (l, repositoryIntegrityVerifier) -> new RepositoryVerifyIntegrityResponseChunk.Builder( + responseWriter, + RepositoryVerifyIntegrityResponseChunk.Type.START_RESPONSE, + currentTimeMillisSupplier.getAsLong() + ).write(l.map(ignored -> repositoryIntegrityVerifier)) + ) + .andThen((l, repositoryIntegrityVerifier) -> repositoryIntegrityVerifier.start(l)) + .addListener(listener); + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/TransportRepositoryVerifyIntegrityCoordinationAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/TransportRepositoryVerifyIntegrityCoordinationAction.java new file mode 100644 index 0000000000000..d5a5749997d8d --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/TransportRepositoryVerifyIntegrityCoordinationAction.java @@ -0,0 +1,186 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; + +import java.util.Map; +import java.util.concurrent.Executor; + +/** + * Transport action that coordinates the integrity verification, dispatching a request to run the verification on the master and setting up + * the machinery needed to send the response chunks back to the client. + */ +public class TransportRepositoryVerifyIntegrityCoordinationAction extends TransportAction< + TransportRepositoryVerifyIntegrityCoordinationAction.Request, + RepositoryVerifyIntegrityResponse> { + + /* + * Message flow: the coordinating node (the one running this action) forwards the request on to a master node which actually runs the + * verification. The master node in turn sends requests back to this node containing chunks of response, either information about the + * snapshots processed, or about the restorability of the indices in the repository, or details of any verification anomalies found. + * When the process is complete the master responds to the original transport request with the final results: + * + * +---------+ +-------------+ +--------+ + * | Client | | Coordinator | | Master | + * +---------+ +-------------+ +--------+ + * | | | + * |-[REST request]--------------------->| | + * | |---[master node request]----------------->| ----------------------\ + * | | |-| Initialize verifier | + * | | | |---------------------| + * | |<--[START_RESPONSE chunk request]---------| + * |<---[headers & initial JSON body]----| | + * | |---[START_RESPONSE chunk response]------->| ------------------\ + * | | |-| Verify snapshot | + * | | | |-----------------| + * | |<--[SNAPSHOT_INFO chunk request]----------| + * |<---[more JSON body]-----------------| | + * | |---[SNAPSHOT_INFO chunk response]-------->| ------------------\ + * | | |-| Verify snapshot | + * | | | |-----------------| + * | |<--[SNAPSHOT_INFO chunk request]----------| + * |<---[more JSON body]-----------------| | + * | |---[SNAPSHOT_INFO chunk response]-------->| ... + * . . . + * . . . + * | | | -----------------------------\ + * | | |-| Verify index restorability | + * | | | |----------------------------| + * | |<--[INDEX_RESTORABILITY chunk request]----| + * |<---[more JSON body]-----------------| | + * | |---[INDEX_RESTORABILITY chunk response]-->| -----------------------------\ + * | | |-| Verify index restorability | + * | | | |----------------------------| + * | |<--[INDEX_RESTORABILITY chunk request]----| + * |<---[more JSON body]-----------------| | + * | |---[INDEX_RESTORABILITY chunk response]-->| ... + * . . . + * . . . + * | |<--[response to master node request]------| + * |<--[final JSON to complete body]-----| | + * + * This message flow ties the lifecycle of the verification process to that of the transport request sent from coordinator to master, + * which means it integrates well with the tasks framework and handles network issues properly. An alternative would be for the + * coordinator to repeatedly request chunks from the master, but that would mean that there's no one task representing the whole + * process, and it'd be a little tricky for the master node to know if the coordinator has failed and the verification should be + * cancelled. + */ + + public static final ActionType INSTANCE = new ActionType<>( + "cluster:admin/repository/verify_integrity" + ); + + private final ActiveRepositoryVerifyIntegrityTasks activeRepositoryVerifyIntegrityTasks = new ActiveRepositoryVerifyIntegrityTasks(); + + private final TransportService transportService; + private final ClusterService clusterService; + private final Executor managementExecutor; + + public static class Request extends ActionRequest { + private final RepositoryVerifyIntegrityParams requestParams; + private final RepositoryVerifyIntegrityResponseStream responseStream; + + public Request(RepositoryVerifyIntegrityParams requestParams, RepositoryVerifyIntegrityResponseStream responseStream) { + this.requestParams = requestParams; + this.responseStream = responseStream; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public RepositoryVerifyIntegrityParams requestParams() { + return requestParams; + } + + public RepositoryVerifyIntegrityResponseStream responseStream() { + return responseStream; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers); + } + } + + @Inject + public TransportRepositoryVerifyIntegrityCoordinationAction( + TransportService transportService, + ClusterService clusterService, + RepositoriesService repositoriesService, + ActionFilters actionFilters + ) { + super( + INSTANCE.name(), + actionFilters, + transportService.getTaskManager(), + transportService.getThreadPool().executor(ThreadPool.Names.MANAGEMENT) + ); + + this.transportService = transportService; + this.clusterService = clusterService; + this.managementExecutor = transportService.getThreadPool().executor(ThreadPool.Names.MANAGEMENT); + + // register subsidiary actions + new TransportRepositoryVerifyIntegrityAction(transportService, repositoriesService, actionFilters, managementExecutor); + + new TransportRepositoryVerifyIntegrityResponseChunkAction( + transportService, + actionFilters, + managementExecutor, + activeRepositoryVerifyIntegrityTasks + ); + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + ActionListener.run( + ActionListener.releaseAfter( + listener, + activeRepositoryVerifyIntegrityTasks.registerResponseBuilder(task.getId(), request.responseStream()) + ), + l -> { + final var master = clusterService.state().nodes().getMasterNode(); + if (master == null) { + // no waiting around or retries here, we just fail immediately + throw new MasterNotDiscoveredException(); + } + transportService.sendChildRequest( + master, + TransportRepositoryVerifyIntegrityAction.ACTION_NAME, + new TransportRepositoryVerifyIntegrityAction.Request( + transportService.getLocalNode(), + task.getId(), + request.requestParams() + ), + task, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(l, RepositoryVerifyIntegrityResponse::new, managementExecutor) + ); + } + ); + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/TransportRepositoryVerifyIntegrityResponseChunkAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/TransportRepositoryVerifyIntegrityResponseChunkAction.java new file mode 100644 index 0000000000000..9015866fb3ec2 --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/integrity/TransportRepositoryVerifyIntegrityResponseChunkAction.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.repositories.blobstore.testkit.integrity; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.Executor; + +/** + * Transport action that handles a response chunk on the coordinating node, sending it out to the REST client. + */ +class TransportRepositoryVerifyIntegrityResponseChunkAction extends HandledTransportAction< + TransportRepositoryVerifyIntegrityResponseChunkAction.Request, + ActionResponse.Empty> { + + static final String ACTION_NAME = TransportRepositoryVerifyIntegrityCoordinationAction.INSTANCE.name() + "[response_chunk]"; + + private final ActiveRepositoryVerifyIntegrityTasks activeRepositoryVerifyIntegrityTasks; + + TransportRepositoryVerifyIntegrityResponseChunkAction( + TransportService transportService, + ActionFilters actionFilters, + Executor executor, + ActiveRepositoryVerifyIntegrityTasks activeRepositoryVerifyIntegrityTasks + ) { + super(ACTION_NAME, transportService, actionFilters, Request::new, executor); + this.activeRepositoryVerifyIntegrityTasks = activeRepositoryVerifyIntegrityTasks; + } + + static class Request extends ActionRequest { + private final long coordinatingTaskId; + private final RepositoryVerifyIntegrityResponseChunk chunkContents; + + Request(long coordinatingTaskId, RepositoryVerifyIntegrityResponseChunk chunkContents) { + this.coordinatingTaskId = coordinatingTaskId; + this.chunkContents = Objects.requireNonNull(chunkContents); + } + + Request(StreamInput in) throws IOException { + super(in); + coordinatingTaskId = in.readVLong(); + chunkContents = new RepositoryVerifyIntegrityResponseChunk(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(coordinatingTaskId); + chunkContents.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public RepositoryVerifyIntegrityResponseChunk chunkContents() { + return chunkContents; + } + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + ActionListener.run(listener, l -> { + final var responseStream = activeRepositoryVerifyIntegrityTasks.acquireResponseStream(request.coordinatingTaskId); + try { + if (request.chunkContents().type() == RepositoryVerifyIntegrityResponseChunk.Type.START_RESPONSE) { + responseStream.startResponse(() -> l.onResponse(ActionResponse.Empty.INSTANCE)); + } else { + responseStream.writeChunk(request.chunkContents(), () -> l.onResponse(ActionResponse.Empty.INSTANCE)); + } + } finally { + responseStream.decRef(); + } + }); + } +} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/snapshot-repo-test-kit/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification new file mode 100644 index 0000000000000..ae11c3bb39d0b --- /dev/null +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKitFeatures diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT.java index 296af0c983279..2c92d5387d871 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.spatial.search; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.plugins.Plugin; @@ -20,6 +22,8 @@ import java.util.Collection; import java.util.Collections; +@UpdateForV9 +@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT extends GeoBoundingBoxQueryIntegTestCase { @Override diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java index 3cf70b1d477b6..b48f5a8c17e98 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.spatial.search; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Circle; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -27,6 +29,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +@UpdateForV9 +@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeWithDocValuesIT extends GeoShapeIntegTestCase { @Override diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java index 5999a3ff1e151..0e04cfe6757bf 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.utils.GeometryValidator; import org.elasticsearch.geometry.utils.WellKnownBinary; @@ -91,16 +92,6 @@ public void testDefaultConfiguration() throws IOException { assertTrue(fieldType.hasDocValues()); } - public void testDefaultDocValueConfigurationOnPre7_8() throws IOException { - IndexVersion oldVersion = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_7_0); - DocumentMapper defaultMapper = createDocumentMapper(oldVersion, fieldMapping(this::minimalMapping)); - Mapper fieldMapper = defaultMapper.mappers().getMapper(FIELD_NAME); - assertThat(fieldMapper, instanceOf(fieldMapperClass())); - - GeoShapeWithDocValuesFieldMapper geoShapeFieldMapper = (GeoShapeWithDocValuesFieldMapper) fieldMapper; - assertFalse(geoShapeFieldMapper.fieldType().hasDocValues()); - } - /** * Test that orientation parameter correctly parses */ @@ -289,6 +280,8 @@ public void testInvalidCurrentVersion() { ); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public void testGeoShapeLegacyMerge() throws Exception { IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); MapperService m = createMapperService(version, fieldMapping(b -> b.field("type", getFieldName()))); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java index 35ccfe8deb5fe..d030a2bbf81ad 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapperTests.java @@ -114,7 +114,7 @@ public void testDefaultConfiguration() throws IOException { public void testDefaultDocValueConfigurationOnPre8_4() throws IOException { // TODO verify which version this test is actually valid for (when PR is actually merged) - IndexVersion oldVersion = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_8_3_0); + IndexVersion oldVersion = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_3_0); DocumentMapper defaultMapper = createDocumentMapper(oldVersion, fieldMapping(this::minimalMapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper(FIELD_NAME); assertThat(fieldMapper, instanceOf(fieldMapperClass())); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/GeoShapeQueryBuilderGeoShapeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/GeoShapeQueryBuilderGeoShapeTests.java index 593656411eb41..405ef5c480687 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/GeoShapeQueryBuilderGeoShapeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/GeoShapeQueryBuilderGeoShapeTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.ShapeType; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.GeoShapeQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; @@ -88,25 +87,12 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { } if (ESTestCase.randomBoolean()) { SearchExecutionContext context = AbstractBuilderTestCase.createSearchExecutionContext(); - if (context.indexVersionCreated().onOrAfter(IndexVersions.V_7_5_0)) { // CONTAINS is only supported from version 7.5 - if (shapeType == ShapeType.LINESTRING || shapeType == ShapeType.MULTILINESTRING) { - builder.relation(ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); - } else { - builder.relation( - ESTestCase.randomFrom( - ShapeRelation.DISJOINT, - ShapeRelation.INTERSECTS, - ShapeRelation.WITHIN, - ShapeRelation.CONTAINS - ) - ); - } + if (shapeType == ShapeType.LINESTRING || shapeType == ShapeType.MULTILINESTRING) { + builder.relation(ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); } else { - if (shapeType == ShapeType.LINESTRING || shapeType == ShapeType.MULTILINESTRING) { - builder.relation(ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS)); - } else { - builder.relation(ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN)); - } + builder.relation( + ESTestCase.randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) + ); } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java index 053931a882e4c..de66d0b822c94 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/LegacyGeoShapeWithDocValuesQueryTests.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.spatial.index.query; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoJson; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.MultiPoint; @@ -39,6 +41,8 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; +@UpdateForV9 +@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeWithDocValuesQueryTests extends GeoShapeQueryTestCase { @SuppressWarnings("deprecation") diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java index aa5ae72df2b9e..72073a6eff550 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverShapeTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.ShapeType; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.SearchExecutionContext; @@ -33,18 +32,10 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws @Override protected ShapeRelation getShapeRelation(ShapeType type) { SearchExecutionContext context = createSearchExecutionContext(); - if (context.indexVersionCreated().onOrAfter(IndexVersions.V_7_5_0)) { // CONTAINS is only supported from version 7.5 - if (type == ShapeType.LINESTRING || type == ShapeType.MULTILINESTRING) { - return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS); - } else { - return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS); - } + if (type == ShapeType.LINESTRING || type == ShapeType.MULTILINESTRING) { + return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS); } else { - if (type == ShapeType.LINESTRING || type == ShapeType.MULTILINESTRING) { - return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS); - } else { - return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN); - } + return randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS); } } diff --git a/x-pack/plugin/sql/qa/jdbc/build.gradle b/x-pack/plugin/sql/qa/jdbc/build.gradle index 42bf524dac17e..022306fe9b306 100644 --- a/x-pack/plugin/sql/qa/jdbc/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/build.gradle @@ -72,7 +72,7 @@ subprojects { // Configure compatibility testing tasks // Compatibility testing for JDBC driver started with version 7.9.0 - BuildParams.bwcVersions.allIndexCompatible.findAll({ it.onOrAfter(Version.fromString("7.9.0")) && it != VersionProperties.elasticsearchVersion }).each { bwcVersion -> + BuildParams.bwcVersions.indexCompatible.findAll({ it.onOrAfter(Version.fromString("7.9.0")) && it != VersionProperties.elasticsearchVersion }).each { bwcVersion -> def baseName = "v${bwcVersion}" def cluster = testClusters.register(baseName) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml index 07ed83e1f8863..95e4aa9b7b4dc 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml @@ -1,9 +1,12 @@ --- "Test data stream lifecycle usage stats": - requires: - cluster_features: "gte_v8.11.0" - reason: "the data stream lifecycle stats were updated to the usage api in 8.11" - test_runner_features: "allowed_warnings" + reason: "Global retention telemetry was added in 8.16+" + test_runner_features: [ capabilities, allowed_warnings ] + capabilities: + - method: GET + path: /_xpack/usage + capabilities: [ 'global_retention_telemetry' ] - do: xpack.usage: {} @@ -12,9 +15,19 @@ - match: { data_lifecycle.enabled: true } - match: { data_lifecycle.count: 0 } - match: { data_lifecycle.default_rollover_used: true } - - match: { data_lifecycle.retention.minimum_millis: 0 } - - match: { data_lifecycle.retention.maximum_millis: 0 } - - match: { data_lifecycle.retention.average_millis: 0 } + + - match: { data_lifecycle.data_retention.configured_data_streams: 0 } + - is_false: data_lifecycle.data_retention.minimum_millis + - is_false: data_lifecycle.data_retention.maximum_millis + - is_false: data_lifecycle.data_retention.average_millis + + - match: { data_lifecycle.effective_retention.retained_data_streams: 0 } + - is_false: data_lifecycle.effective_retention.minimum_millis + - is_false: data_lifecycle.effective_retention.maximum_millis + - is_false: data_lifecycle.effective_retention.average_millis + + - match: { data_lifecycle.global_retention.max.defined: false } + - match: { data_lifecycle.global_retention.default.defined: false } - do: allowed_warnings: @@ -89,9 +102,15 @@ - match: { data_lifecycle.enabled: true } - match: { data_lifecycle.count: 2 } - match: { data_lifecycle.default_rollover_used: true } - - match: { data_lifecycle.retention.minimum_millis: 432000000 } - - match: { data_lifecycle.retention.maximum_millis: 864000000 } - - match: { data_lifecycle.retention.average_millis: 648000000 } + - match: { data_lifecycle.data_retention.configured_data_streams: 2 } + - match: { data_lifecycle.data_retention.minimum_millis: 432000000 } + - match: { data_lifecycle.data_retention.maximum_millis: 864000000 } + - match: { data_lifecycle.data_retention.average_millis: 648000000 } + + - match: { data_lifecycle.effective_retention.retained_data_streams: 2 } + - match: { data_lifecycle.effective_retention.minimum_millis: 432000000 } + - match: { data_lifecycle.data_retention.maximum_millis: 864000000 } + - match: { data_lifecycle.data_retention.average_millis: 648000000 } - do: indices.delete_data_stream: @@ -106,6 +125,13 @@ - match: { data_lifecycle.enabled: true } - match: { data_lifecycle.count: 1 } - match: { data_lifecycle.default_rollover_used: true } - - match: { data_lifecycle.retention.minimum_millis: 432000000 } - - match: { data_lifecycle.retention.maximum_millis: 432000000 } - - match: { data_lifecycle.retention.average_millis: 432000000 } + + - match: { data_lifecycle.data_retention.configured_data_streams: 1 } + - match: { data_lifecycle.data_retention.minimum_millis: 432000000 } + - match: { data_lifecycle.data_retention.maximum_millis: 432000000 } + - match: { data_lifecycle.data_retention.average_millis: 432000000 } + + - match: { data_lifecycle.effective_retention.retained_data_streams: 1 } + - match: { data_lifecycle.effective_retention.minimum_millis: 432000000 } + - match: { data_lifecycle.effective_retention.maximum_millis: 432000000 } + - match: { data_lifecycle.effective_retention.average_millis: 432000000 } diff --git a/x-pack/plugin/src/yamlRestTestV7Compat/resources/rest-api-spec.test.geo_shape/10_compat_geo_shape_with_types.yml b/x-pack/plugin/src/yamlRestTestV7Compat/resources/rest-api-spec.test.geo_shape/10_compat_geo_shape_with_types.yml deleted file mode 100644 index 2cf26f336ba47..0000000000000 --- a/x-pack/plugin/src/yamlRestTestV7Compat/resources/rest-api-spec.test.geo_shape/10_compat_geo_shape_with_types.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "warnings" - - "allowed_warnings_regex" - ---- -"Test geo_shape with type": - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - indices.create: - index: shapes - include_type_name: true - body: - mappings: - _doc: - properties: - location: - type: geo_shape - - - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - index: - index: shapes - type: _doc - id: deu - body: - location: - type : "envelope" - coordinates: [[13.0, 53.0], [14.0, 52.0]] - - - do: - indices.refresh: {} - - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - warnings: - - "[types removal] Types are deprecated in [geo_shape] queries. The type should no longer be specified in the [indexed_shape] section." - search: - rest_total_hits_as_int: true - index: shapes - size: 0 - body: - query: - bool: - filter: - geo_shape: - location: - indexed_shape: - index: "shapes" - type: "_doc" - id: "deu" - path: "location" - - - match: {hits.total: 1 } diff --git a/x-pack/plugin/src/yamlRestTestV7Compat/resources/rest-api-spec/test/freeze.gone/10_basic_compat.yml b/x-pack/plugin/src/yamlRestTestV7Compat/resources/rest-api-spec/test/freeze.gone/10_basic_compat.yml deleted file mode 100644 index a2d05446bbe1a..0000000000000 --- a/x-pack/plugin/src/yamlRestTestV7Compat/resources/rest-api-spec/test/freeze.gone/10_basic_compat.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "warnings_regex" - - do: - index: - index: some-test-index-1 - id: "1" - body: { foo: bar } - ---- -"Freezing a non-existent index throws 404": - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - indices.freeze: - index: some-test-index-404 - catch: missing - ---- -"Freezing an index throws 410": - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - indices.freeze: - index: some-test-index-1 - catch: /It is no longer possible to freeze indices, but existing frozen indices can still be unfrozen/ - ---- -"Without compat headers throws 400": - - do: - indices.freeze: - index: some-test-index-1 - catch: /no handler found for uri/ diff --git a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle index 13470e3c2e4ec..b429e123bb631 100644 --- a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle @@ -12,7 +12,7 @@ dependencies { testImplementation project(':x-pack:qa') } -Version ccsCompatVersion = new Version(VersionProperties.getElasticsearchVersion().getMajor(), VersionProperties.getElasticsearchVersion().getMinor() - 1, 0) +Version ccsCompatVersion = BuildParams.bwcVersions.minimumWireCompatibleVersion restResources { restApi { diff --git a/x-pack/plugin/watcher/qa/rest/build.gradle b/x-pack/plugin/watcher/qa/rest/build.gradle index 3f61bdcb3c2ed..a911c022212b2 100644 --- a/x-pack/plugin/watcher/qa/rest/build.gradle +++ b/x-pack/plugin/watcher/qa/rest/build.gradle @@ -35,25 +35,3 @@ if (BuildParams.inFipsJvm){ tasks.named("yamlRestTest").configure{enabled = false } } - -tasks.named("yamlRestTestV7CompatTransform").configure{ task -> - task.skipTest("mustache/30_search_input/Test search input mustache integration (using request body and rest_total_hits_as_int)", "remove JodaCompatibleDateTime -- ZonedDateTime doesn't output millis/nanos if they're 0 (#78417)") - task.skipTest("mustache/30_search_input/Test search input mustache integration (using request body)", "remove JodaCompatibleDateTime -- ZonedDateTime doesn't output millis/nanos if they're 0 (#78417)") - task.skipTest("mustache/40_search_transform/Test search transform mustache integration (using request body)", "remove JodaCompatibleDateTime -- ZonedDateTime doesn't output millis/nanos if they're 0 (#78417)") - task.skipTest("painless/40_exception/Test painless exceptions are returned when logging a broken response", "Exceptions are no longer thrown from Mustache, but from the transform action itself") - task.replaceKeyInDo("watcher.ack_watch", "xpack-watcher.ack_watch") - task.replaceKeyInDo("watcher.activate_watch", "xpack-watcher.activate_watch") - task.replaceKeyInDo("watcher.deactivate_watch", "xpack-watcher.deactivate_watch") - task.replaceKeyInDo("watcher.delete_watch", "xpack-watcher.delete_watch") - task.replaceKeyInDo("watcher.execute_watch", "xpack-watcher.execute_watch") - task.replaceKeyInDo("watcher.get_watch", "xpack-watcher.get_watch") - task.replaceKeyInDo("watcher.put_watch", "xpack-watcher.put_watch") - task.replaceKeyInDo("watcher.start", "xpack-watcher.start") - task.replaceKeyInDo("watcher.stats", "xpack-watcher.stats") - task.replaceKeyInDo("watcher.stop", "xpack-watcher.stop") - - task.addAllowedWarningRegex(".*_xpack/watcher/.* is deprecated.*") - task.addAllowedWarningRegex("\\[types removal\\].*") - task.replaceValueTextByKeyValue("path", "/my_index/my_type/{{ctx.watch_id}}", "/my_index/_doc/{{ctx.watch_id}}", - "Test webhook action with mustache integration") -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.ack_watch.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.ack_watch.json deleted file mode 100644 index 66ee9a7d3b83f..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.ack_watch.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "xpack-watcher.ack_watch":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html", - "description":"Acknowledges a watch, manually throttling the execution of the watch's actions." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/watch/{watch_id}/_ack", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "watch_id":{ - "type":"string", - "description":"Watch ID" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/watcher/watch/{watch_id}/_ack/{action_id}", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "watch_id":{ - "type":"string", - "description":"Watch ID" - }, - "action_id":{ - "type":"list", - "description":"A comma-separated list of the action ids to be acked" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.activate_watch.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.activate_watch.json deleted file mode 100644 index 2790b3ac69040..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.activate_watch.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "xpack-watcher.activate_watch":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html", - "description":"Activates a currently inactive watch." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/watch/{watch_id}/_activate", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "watch_id":{ - "type":"string", - "description":"Watch ID" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.deactivate_watch.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.deactivate_watch.json deleted file mode 100644 index 7633c5ab9ffcc..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.deactivate_watch.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "xpack-watcher.deactivate_watch":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html", - "description":"Deactivates a currently active watch." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/watch/{watch_id}/_deactivate", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "watch_id":{ - "type":"string", - "description":"Watch ID" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.delete_watch.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.delete_watch.json deleted file mode 100644 index b42003207eda4..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.delete_watch.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-watcher.delete_watch":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html", - "description":"Removes a watch from Watcher." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/watch/{id}", - "methods":[ - "DELETE" - ], - "parts":{ - "id":{ - "type":"string", - "description":"Watch ID" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.execute_watch.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.execute_watch.json deleted file mode 100644 index 4dba5bd1817a5..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.execute_watch.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "xpack-watcher.execute_watch":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html", - "description":"Forces the execution of a stored watch." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/watch/{id}/_execute", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"Watch ID" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/watcher/watch/_execute", - "methods":[ - "PUT", - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "debug":{ - "type":"boolean", - "description":"indicates whether the watch should execute in debug mode", - "required":false - } - }, - "body":{ - "description":"Execution control", - "required":false - } - } -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.get_watch.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.get_watch.json deleted file mode 100644 index 0fa158751fba4..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.get_watch.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "xpack-watcher.get_watch":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html", - "description":"Retrieves a watch by its ID." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/watch/{id}", - "methods":[ - "GET" - ], - "parts":{ - "id":{ - "type":"string", - "description":"Watch ID" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{} - } -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.put_watch.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.put_watch.json deleted file mode 100644 index b6d17b0d92739..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.put_watch.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "xpack-watcher.put_watch":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html", - "description":"Creates a new watch, or updates an existing one." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/watch/{id}", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"Watch ID" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "active":{ - "type":"boolean", - "description":"Specify whether the watch is in/active by default" - }, - "version":{ - "type":"number", - "description":"Explicit version number for concurrency control" - }, - "if_seq_no":{ - "type":"number", - "description":"only update the watch if the last operation that has changed the watch has the specified sequence number" - }, - "if_primary_term":{ - "type":"number", - "description":"only update the watch if the last operation that has changed the watch has the specified primary term" - } - }, - "body":{ - "description":"The watch", - "required":false - } - } -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.start.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.start.json deleted file mode 100644 index 051344d02e878..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.start.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "xpack-watcher.start":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html", - "description":"Starts Watcher if it is not already running." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/_start", - "methods":[ - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{} - } -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.stats.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.stats.json deleted file mode 100644 index f9dfd25f1e972..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.stats.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "xpack-watcher.stats":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html", - "description":"Retrieves the current Watcher metrics." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/stats", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/watcher/stats/{metric}", - "methods":[ - "GET" - ], - "parts":{ - "metric":{ - "type":"list", - "options":[ - "_all", - "queued_watches", - "current_watches", - "pending_watches" - ], - "description":"Controls what additional stat metrics should be include in the response" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "metric":{ - "type":"list", - "options":[ - "_all", - "queued_watches", - "current_watches", - "pending_watches" - ], - "description":"Controls what additional stat metrics should be include in the response" - }, - "emit_stacktraces":{ - "type":"boolean", - "description":"Emits stack traces of currently running watches", - "required":false - } - } - } -} diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.stop.json b/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.stop.json deleted file mode 100644 index c31bb162f6015..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-watcher.stop.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "xpack-watcher.stop":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html", - "description":"Stops Watcher if it is running." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/watcher/_stop", - "methods":[ - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{} - } -} diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index 578fc90bd3e5b..7abce10a82f3c 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -50,7 +50,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -106,7 +105,6 @@ static SearchExecutionContext createMockSearchExecutionContext(boolean allowExpe static final int MAX_FIELD_LENGTH = 30; static WildcardFieldMapper wildcardFieldType; - static WildcardFieldMapper wildcardFieldType79; static KeywordFieldMapper keywordFieldType; private DirectoryReader rewriteReader; private BaseDirectoryWrapper rewriteDir; @@ -128,9 +126,6 @@ public void setUp() throws Exception { builder.ignoreAbove(MAX_FIELD_LENGTH); wildcardFieldType = builder.build(MapperBuilderContext.root(false, false)); - Builder builder79 = new WildcardFieldMapper.Builder(WILDCARD_FIELD_NAME, IndexVersions.V_7_9_0); - wildcardFieldType79 = builder79.build(MapperBuilderContext.root(false, false)); - org.elasticsearch.index.mapper.KeywordFieldMapper.Builder kwBuilder = new KeywordFieldMapper.Builder( KEYWORD_FIELD_NAME, IndexVersion.current() @@ -211,37 +206,6 @@ public void testIgnoreAbove() throws IOException { assertTrue(fields.stream().anyMatch(field -> "field".equals(field.stringValue()))); } - public void testBWCIndexVersion() throws IOException { - // Create old format index using wildcard ngram analyzer used in 7.9 launch - Directory dir = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(WildcardFieldMapper.WILDCARD_ANALYZER_7_9); - iwc.setMergePolicy(newTieredMergePolicy(random())); - RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); - - Document doc = new Document(); - LuceneDocument parseDoc = new LuceneDocument(); - addFields(parseDoc, doc, "a b"); - indexDoc(parseDoc, doc, iw); - - iw.forceMerge(1); - DirectoryReader reader = iw.getReader(); - IndexSearcher searcher = newSearcher(reader); - iw.close(); - - // Unnatural circumstance - testing we fail if we were to use the new analyzer on old index - Query oldWildcardFieldQuery = wildcardFieldType.fieldType().wildcardQuery("a b", null, null); - TopDocs oldWildcardFieldTopDocs = searcher.search(oldWildcardFieldQuery, 10, Sort.INDEXORDER); - assertThat(oldWildcardFieldTopDocs.totalHits.value, equalTo(0L)); - - // Natural circumstance test we revert to the old analyzer for old indices - Query wildcardFieldQuery = wildcardFieldType79.fieldType().wildcardQuery("a b", null, null); - TopDocs wildcardFieldTopDocs = searcher.search(wildcardFieldQuery, 10, Sort.INDEXORDER); - assertThat(wildcardFieldTopDocs.totalHits.value, equalTo(1L)); - - reader.close(); - dir.close(); - } - // Test long query strings don't cause exceptions public void testTooBigQueryField() throws IOException { Directory dir = newDirectory(); diff --git a/x-pack/qa/repository-old-versions/build.gradle b/x-pack/qa/repository-old-versions/build.gradle index c8659aa0753f4..1abf6662a1b8b 100644 --- a/x-pack/qa/repository-old-versions/build.gradle +++ b/x-pack/qa/repository-old-versions/build.gradle @@ -66,7 +66,7 @@ if (OS.current() == OS.WINDOWS) { }); int currentMajorVersion = org.elasticsearch.gradle.VersionProperties.elasticsearchVersion.major - assert (currentMajorVersion - 2) == 6 : "add archive BWC tests for major version " + (currentMajorVersion - 2) + assert (currentMajorVersion - 2) == 7 : "add archive BWC tests for major version " + (currentMajorVersion - 2) for (String versionString : ['5.0.0', '5.6.16', '6.0.0', '6.8.20']) { Version version = Version.fromString(versionString) String packageName = 'org.elasticsearch.distribution.zip' diff --git a/x-pack/qa/xpack-prefix-rest-compat/build.gradle b/x-pack/qa/xpack-prefix-rest-compat/build.gradle deleted file mode 100644 index 8b91aae21ff73..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/build.gradle +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - - -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.VersionProperties - -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' - -/** - * This project exists to test the _xpack prefix for REST compatibility. The _xpack prefix was removed from the specification, but still supported - * in 7x. This project re-introduces the _xpack prefix in the specification but only for compatibility testing purposes. - */ - -configurations { - compatXpackTests -} - -int compatVersion = VersionProperties.getElasticsearchVersion().getMajor() - 1; - -dependencies { - "yamlRestTestV${compatVersion}CompatImplementation" project(':test:framework') - "yamlRestTestV${compatVersion}CompatImplementation"(testArtifact(project(':x-pack:plugin'))) - compatXpackTests project(path: ':x-pack:plugin', configuration: 'restCompatTests') -} - -// copy the already transformed compatible rest tests from the x-pack compatible tests -tasks.named("copyRestCompatTestTask").configure { task -> - task.dependsOn(configurations.compatXpackTests); - task.setXpackConfig(configurations.compatXpackTests); - task.getIncludeXpack().set(List.of("license", "migration", "ml", "rollup", "sql", "ssl")); - def fileOperations = task.getFileOperations() - task.getOutputResourceDir().set(project.getLayout().getBuildDirectory().dir("restResources/v${compatVersion}/yamlTests/original")) - task.setXpackConfigToFileTree( - config -> fileOperations.fileTree( - config.getSingleFile() - ) - ) - } - -// location for keys and certificates -File extraResourceDir = file("$buildDir/extra_resource") -File nodeKey = file("$extraResourceDir/testnode.pem") -File nodeCert = file("$extraResourceDir/testnode.crt") -// location for service tokens -File serviceTokens = file("$extraResourceDir/service_tokens") - -// Add key and certs to test classpath: it expects them there -// User cert and key PEM files instead of a JKS Keystore for the cluster's trust material so that -// it can run in a FIPS 140 JVM -// TODO: Remove all existing uses of cross project file references when the new approach for referencing static files is available -// https://github.com/elastic/elasticsearch/pull/32201 -def copyExtraResources = tasks.register("copyExtraResources", Copy) { - from(project(':x-pack:plugin:core').file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/')) { - include 'testnode.crt', 'testnode.pem' - } - from(project(':x-pack:plugin:security:qa:service-account').file('src/javaRestTest/resources/')) { - include 'service_tokens' - } - into extraResourceDir -} -// Add keystores to test classpath: it expects it there -sourceSets."yamlRestTestV${compatVersion}Compat".resources.compiledBy(copyExtraResources, t -> { - def dirProp = project.objects.directoryProperty() - dirProp.set(t.destinationDir) - return dirProp; -}) - -tasks.named("processYamlRestTestV${compatVersion}CompatResources").configure { - dependsOn("copyExtraResources") -} - -testClusters.configureEach { - testDistribution = 'DEFAULT' // this is important since we use the reindex module in ML - setting 'xpack.ml.enabled', 'true' - setting 'xpack.security.enabled', 'true' - setting 'xpack.watcher.enabled', 'false' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.authc.api_key.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.security.transport.ssl.key', nodeKey.name - setting 'xpack.security.transport.ssl.certificate', nodeCert.name - setting 'xpack.security.transport.ssl.verification_mode', 'certificate' - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - // disable ILM history, since it disturbs tests using _all - setting 'indices.lifecycle.history_index_enabled', 'false' - keystore 'bootstrap.password', 'x-pack-test-password' - keystore 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - setting 'xpack.searchable.snapshot.shared_cache.size', '16MB' - setting 'xpack.searchable.snapshot.shared_cache.region_size', '256KB' - - user username: "x_pack_rest_user", password: "x-pack-test-password" - extraConfigFile nodeKey.name, nodeKey - extraConfigFile nodeCert.name, nodeCert - extraConfigFile serviceTokens.name, serviceTokens - - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") -} - -// transform (again) the (already) transformed x-pack compatibility tests to test the xpack prefixes -tasks.named("yamlRestTestV7CompatTransform").configure{ task -> - - task.replaceKeyInDo("license.delete", "xpack-license.delete") - task.replaceKeyInDo("license.get", "xpack-license.get") - task.replaceKeyInDo("license.get_basic_status", "xpack-license.get_basic_status") - task.replaceKeyInDo("license.get_trial_status", "xpack-license.get_trial_status") - task.replaceKeyInDo("license.post", "xpack-license.post") - task.replaceKeyInDo("license.post_start_basic", "xpack-license.post_start_basic") - task.replaceKeyInDo("license.post_start_trial", "xpack-license.post_start_trial") - task.addAllowedWarningRegex(".*_xpack/license.* is deprecated.*") - - task.replaceKeyInDo("migration.deprecations", "xpack-migration.deprecations") - task.addAllowedWarningRegex(".*_xpack/migration.* is deprecated.*") - - task.replaceKeyInDo("ml.close_job", "xpack-ml.close_job") - task.replaceKeyInDo("ml.delete_calendar", "xpack-ml.delete_calendar") - task.replaceKeyInDo("ml.delete_calendar_event", "xpack-ml.delete_calendar_event") - task.replaceKeyInDo("ml.delete_calendar_job", "xpack-ml.delete_calendar_job") - task.replaceKeyInDo("ml.delete_datafeed", "xpack-ml.delete_datafeed") - task.replaceKeyInDo("ml.delete_expired_data", "xpack-ml.delete_expired_data") - task.replaceKeyInDo("ml.delete_filter", "xpack-ml.delete_filter") - task.replaceKeyInDo("ml.delete_forecast", "xpack-ml.delete_forecast") - task.replaceKeyInDo("ml.delete_job", "xpack-ml.delete_job") - task.replaceKeyInDo("ml.delete_model_snapshot", "xpack-ml.delete_model_snapshot") - task.replaceKeyInDo("ml.flush_job", "xpack-ml.flush_job") - task.replaceKeyInDo("ml.forecast", "xpack-ml.forecast") - task.replaceKeyInDo("ml.get_buckets", "xpack-ml.get_buckets") - task.replaceKeyInDo("ml.get_calendar_events", "xpack-ml.get_calendar_events") - task.replaceKeyInDo("ml.get_calendars", "xpack-ml.get_calendars") - task.replaceKeyInDo("ml.get_categories", "xpack-ml.get_categories") - task.replaceKeyInDo("ml.get_datafeed_stats", "xpack-ml.get_datafeed_stats") - task.replaceKeyInDo("ml.get_datafeeds", "xpack-ml.get_datafeeds") - task.replaceKeyInDo("ml.get_filters", "xpack-ml.get_filters") - task.replaceKeyInDo("ml.get_influencers", "xpack-ml.get_influencers") - task.replaceKeyInDo("ml.get_job_stats", "xpack-ml.get_job_stats") - task.replaceKeyInDo("ml.get_jobs", "xpack-ml.get_jobs") - task.replaceKeyInDo("ml.get_model_snapshots", "xpack-ml.get_model_snapshots") - task.replaceKeyInDo("ml.get_overall_buckets", "xpack-ml.get_overall_buckets") - task.replaceKeyInDo("ml.get_records", "xpack-ml.get_records") - task.replaceKeyInDo("ml.info", "xpack-ml.info") - task.replaceKeyInDo("ml.open_job", "xpack-ml.open_job") - task.replaceKeyInDo("ml.post_calendar_events", "xpack-ml.post_calendar_events") - task.replaceKeyInDo("ml.post_data", "xpack-ml.post_data") - task.replaceKeyInDo("ml.preview_datafeed", "xpack-ml.preview_datafeed") - task.replaceKeyInDo("ml.put_calendar", "xpack-ml.put_calendar") - task.replaceKeyInDo("ml.put_calendar_job", "xpack-ml.put_calendar_job") - task.replaceKeyInDo("ml.put_datafeed", "xpack-ml.put_datafeed") - task.replaceKeyInDo("ml.put_filter", "xpack-ml.put_filter") - task.replaceKeyInDo("ml.put_job", "xpack-ml.put_job") - task.replaceKeyInDo("ml.revert_model_snapshot", "xpack-ml.revert_model_snapshot") - task.replaceKeyInDo("ml.set_upgrade_mode", "xpack-ml.set_upgrade_mode") - task.replaceKeyInDo("ml.start_datafeed", "xpack-ml.start_datafeed") - task.replaceKeyInDo("ml.stop_datafeed", "xpack-ml.stop_datafeed") - task.replaceKeyInDo("ml.update_datafeed", "xpack-ml.update_datafeed") - task.replaceKeyInDo("ml.update_filter", "xpack-ml.update_filter") - task.replaceKeyInDo("ml.update_job", "xpack-ml.update_job") - task.replaceKeyInDo("ml.update_model_snapshot", "xpack-ml.update_model_snapshot") - task.replaceKeyInDo("ml.validate", "xpack-ml.validate") - task.replaceKeyInDo("ml.validate_detector", "xpack-ml.validate_detector") - task.addAllowedWarningRegex(".*_xpack/ml.* is deprecated.*") - task.addAllowedWarningRegex("bucket_span .* is not an integral .* of the number of sconds in 1d.* This is now deprecated.*") - - task.replaceKeyInDo("rollup.delete_job", "xpack-rollup.delete_job") - task.replaceKeyInDo("rollup.get_jobs", "xpack-rollup.get_jobs") - task.replaceKeyInDo("rollup.get_rollup_caps", "xpack-rollup.get_rollup_caps") - task.replaceKeyInDo("rollup.get_rollup_index_caps", "xpack-rollup.get_rollup_index_caps") - task.replaceKeyInDo("rollup.put_job", "xpack-rollup.put_job") - task.replaceKeyInDo("rollup.start_job", "xpack-rollup.start_job") - task.replaceKeyInDo("rollup.stop_job", "xpack-rollup.stop_job") - task.addAllowedWarningRegex(".*_xpack/rollup.* is deprecated.*") - - task.replaceKeyInDo("sql.clear_cursor", "xpack-sql.clear_cursor") - task.replaceKeyInDo("sql.query", "xpack-sql.query") - task.replaceKeyInDo("sql.translate", "xpack-sql.translate") - task.addAllowedWarningRegex(".*_xpack/sql.* is deprecated.*") - - task.replaceKeyInDo("ssl.certificates", "xpack-ssl.certificates", "Test get SSL certificates") - task.addAllowedWarningRegexForTest(".*_xpack/ssl.* is deprecated.*", "Test get SSL certificates") -} - -tasks.named("yamlRestTestV7CompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'ml/evaluate_data_frame/Test classification auc_roc', - 'ml/evaluate_data_frame/Test classification auc_roc with default top_classes_field', - 'ml/evaluate_data_frame/Test outlier_detection auc_roc', - 'ml/evaluate_data_frame/Test outlier_detection auc_roc include curve', - 'ml/evaluate_data_frame/Test outlier_detection auc_roc given actual_field is int', - ].join(',') -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java deleted file mode 100644 index 3e060d2875e8a..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.test.rest; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; - -public class XPackRestIT extends AbstractXPackRestTest { - - public XPackRestIT(ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return createParameters(); - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.delete.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.delete.json deleted file mode 100644 index 3a20798a81482..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.delete.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "xpack-license.delete":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html", - "description":"Deletes licensing information for the cluster" - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/license", - "methods":[ - "DELETE" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.get.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.get.json deleted file mode 100644 index e5c89084c3759..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.get.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "xpack-license.get":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html", - "description":"Retrieves licensing information for the cluster" - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/license", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" - }, - "accept_enterprise":{ - "type":"boolean", - "description":"Supported for backwards compatibility with 7.x. If this param is used it must be set to true", - "deprecated":true - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.get_basic_status.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.get_basic_status.json deleted file mode 100644 index f0808e45d2da8..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.get_basic_status.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "xpack-license.get_basic_status":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html", - "description":"Retrieves information about the status of the basic license." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/license/basic_status", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{} - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.get_trial_status.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.get_trial_status.json deleted file mode 100644 index 8ccde8365830f..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.get_trial_status.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "xpack-license.get_trial_status":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html", - "description":"Retrieves information about the status of the trial license." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/license/trial_status", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{} - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.post.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.post.json deleted file mode 100644 index 8db5533bbad9e..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.post.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "xpack-license.post":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html", - "description":"Updates the license for the cluster." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/license", - "methods":[ - "PUT", - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "acknowledge":{ - "type":"boolean", - "description":"whether the user has acknowledged acknowledge messages (default: false)" - }, - "master_timeout": { - "type": "time", - "description": "Timeout for processing on master node" - }, - "timeout": { - "type": "time", - "description": "Timeout for acknowledgement of update from all nodes in cluster" - } - }, - "body":{ - "description":"licenses to be installed" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.post_start_basic.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.post_start_basic.json deleted file mode 100644 index 4589ed26386ce..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.post_start_basic.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-license.post_start_basic":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html", - "description":"Starts an indefinite basic license." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/license/start_basic", - "methods":[ - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "acknowledge":{ - "type":"boolean", - "description":"whether the user has acknowledged acknowledge messages (default: false)" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.post_start_trial.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.post_start_trial.json deleted file mode 100644 index 225034d397ef7..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-license.post_start_trial.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "xpack-license.post_start_trial":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html", - "description":"starts a limited time trial license." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/license/start_trial", - "methods":[ - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "type":{ - "type":"string", - "description":"The type of trial license to generate (default: \"trial\")" - }, - "acknowledge":{ - "type":"boolean", - "description":"whether the user has acknowledged acknowledge messages (default: false)" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-migration.deprecations.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-migration.deprecations.json deleted file mode 100644 index 05885f8458770..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-migration.deprecations.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "xpack-migration.deprecations":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html", - "description":"Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/migration/deprecations", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/{index}/_xpack/migration/deprecations", - "methods":[ - "GET" - ], - "parts":{ - "index":{ - "type":"string", - "description":"Index pattern" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{} - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.close_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.close_job.json deleted file mode 100644 index afbf2591e10f9..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.close_job.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "xpack-ml.close_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html", - "description":"Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/_close", - "methods":[ - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The name of the job to close" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "allow_no_match":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)" - }, - "allow_no_jobs":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)", - "deprecated":true - }, - "force":{ - "type":"boolean", - "required":false, - "description":"True if the job should be forcefully closed" - }, - "timeout":{ - "type":"time", - "description":"Controls the time to wait until a job has closed. Default to 30 minutes" - } - }, - "body":{ - "description":"The URL params optionally sent in the body", - "required":false - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_calendar.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_calendar.json deleted file mode 100644 index 7965921a5ea21..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_calendar.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-ml.delete_calendar":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html", - "description":"Deletes a calendar." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/calendars/{calendar_id}", - "methods":[ - "DELETE" - ], - "parts":{ - "calendar_id":{ - "type":"string", - "description":"The ID of the calendar to delete" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_calendar_event.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_calendar_event.json deleted file mode 100644 index 7584b2512a8b3..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_calendar_event.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "xpack-ml.delete_calendar_event":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-event.html", - "description":"Deletes scheduled events from a calendar." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/calendars/{calendar_id}/events/{event_id}", - "methods":[ - "DELETE" - ], - "parts":{ - "calendar_id":{ - "type":"string", - "description":"The ID of the calendar to modify" - }, - "event_id":{ - "type":"string", - "description":"The ID of the event to remove from the calendar" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_calendar_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_calendar_job.json deleted file mode 100644 index bdada60699df0..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_calendar_job.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "xpack-ml.delete_calendar_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-job.html", - "description":"Deletes anomaly detection jobs from a calendar." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/calendars/{calendar_id}/jobs/{job_id}", - "methods":[ - "DELETE" - ], - "parts":{ - "calendar_id":{ - "type":"string", - "description":"The ID of the calendar to modify" - }, - "job_id":{ - "type":"string", - "description":"The ID of the job to remove from the calendar" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_datafeed.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_datafeed.json deleted file mode 100644 index 6743b3eadff91..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_datafeed.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "xpack-ml.delete_datafeed":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html", - "description":"Deletes an existing datafeed." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/datafeeds/{datafeed_id}", - "methods":[ - "DELETE" - ], - "parts":{ - "datafeed_id":{ - "type":"string", - "description":"The ID of the datafeed to delete" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "force":{ - "type":"boolean", - "required":false, - "description":"True if the datafeed should be forcefully deleted" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_expired_data.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_expired_data.json deleted file mode 100644 index 0520055d968d1..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_expired_data.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "xpack-ml.delete_expired_data":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html", - "description":"Deletes expired and unused machine learning data." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/_delete_expired_data/{job_id}", - "methods":[ - "DELETE" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job(s) to perform expired data hygiene for" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/ml/_delete_expired_data", - "methods":[ - "DELETE" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "requests_per_second":{ - "type":"number", - "required":false, - "description":"The desired requests per second for the deletion processes." - }, - "timeout":{ - "type":"time", - "required":false, - "description":"How long can the underlying delete processes run until they are canceled" - } - }, - "body":{ - "description":"deleting expired data parameters" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_filter.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_filter.json deleted file mode 100644 index c9c4f76e15f53..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_filter.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-ml.delete_filter":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html", - "description":"Deletes a filter." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/filters/{filter_id}", - "methods":[ - "DELETE" - ], - "parts":{ - "filter_id":{ - "type":"string", - "description":"The ID of the filter to delete" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_forecast.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_forecast.json deleted file mode 100644 index 4c647046c3704..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_forecast.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "xpack-ml.delete_forecast":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html", - "description":"Deletes forecasts from a machine learning job." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/_forecast", - "methods":[ - "DELETE" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job from which to delete forecasts" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/_forecast/{forecast_id}", - "methods":[ - "DELETE" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job from which to delete forecasts" - }, - "forecast_id":{ - "type":"string", - "description":"The ID of the forecast to delete, can be comma delimited list. Leaving blank implies `_all`" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "allow_no_forecasts":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if `_all` matches no forecasts" - }, - "timeout":{ - "type":"time", - "required":false, - "description":"Controls the time to wait until the forecast(s) are deleted. Default to 30 seconds" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_job.json deleted file mode 100644 index 9043baacb781e..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_job.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "xpack-ml.delete_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html", - "description":"Deletes an existing anomaly detection job." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}", - "methods":[ - "DELETE" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to delete" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "force":{ - "type":"boolean", - "description":"True if the job should be forcefully deleted", - "default":false - }, - "wait_for_completion":{ - "type":"boolean", - "description":"Should this request wait until the operation has completed before returning", - "default":true - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_model_snapshot.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_model_snapshot.json deleted file mode 100644 index e3f1dd2036602..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.delete_model_snapshot.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "xpack-ml.delete_model_snapshot":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html", - "description":"Deletes an existing model snapshot." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}", - "methods":[ - "DELETE" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to fetch" - }, - "snapshot_id":{ - "type":"string", - "description":"The ID of the snapshot to delete" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.flush_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.flush_job.json deleted file mode 100644 index 801258194d28b..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.flush_job.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "xpack-ml.flush_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html", - "description":"Forces any buffered data to be processed by the job." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/_flush", - "methods":[ - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The name of the job to flush" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "calc_interim":{ - "type":"boolean", - "description":"Calculates interim results for the most recent bucket or all buckets within the latency period" - }, - "start":{ - "type":"string", - "description":"When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results" - }, - "end":{ - "type":"string", - "description":"When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results" - }, - "advance_time":{ - "type":"string", - "description":"Advances time to the given value generating results and updating the model for the advanced interval" - }, - "skip_time":{ - "type":"string", - "description":"Skips time to the given value without generating results or updating the model for the skipped interval" - } - }, - "body":{ - "description":"Flush parameters" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.forecast.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.forecast.json deleted file mode 100644 index f86d1d6f6d943..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.forecast.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "xpack-ml.forecast":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html", - "description":"Predicts the future behavior of a time series by using its historical behavior." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/_forecast", - "methods":[ - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to forecast for" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "duration":{ - "type":"time", - "required":false, - "description":"The duration of the forecast" - }, - "expires_in":{ - "type":"time", - "required":false, - "description":"The time interval after which the forecast expires. Expired forecasts will be deleted at the first opportunity." - }, - "max_model_memory":{ - "type":"string", - "required":false, - "description":"The max memory able to be used by the forecast. Default is 20mb." - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_buckets.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_buckets.json deleted file mode 100644 index 0ccc693b5b682..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_buckets.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "xpack-ml.get_buckets":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html", - "description":"Retrieves anomaly detection job results for one or more buckets." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/results/buckets/{timestamp}", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"ID of the job to get bucket results from" - }, - "timestamp":{ - "type":"string", - "description":"The timestamp of the desired single bucket result" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/results/buckets", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"ID of the job to get bucket results from" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "expand":{ - "type":"boolean", - "description":"Include anomaly records" - }, - "exclude_interim":{ - "type":"boolean", - "description":"Exclude interim results" - }, - "from":{ - "type":"int", - "description":"skips a number of buckets" - }, - "size":{ - "type":"int", - "description":"specifies a max number of buckets to get" - }, - "start":{ - "type":"string", - "description":"Start time filter for buckets" - }, - "end":{ - "type":"string", - "description":"End time filter for buckets" - }, - "anomaly_score":{ - "type":"double", - "description":"Filter for the most anomalous buckets" - }, - "sort":{ - "type":"string", - "description":"Sort buckets by a particular field" - }, - "desc":{ - "type":"boolean", - "description":"Set the sort direction" - } - }, - "body":{ - "description":"Bucket selection details if not provided in URI" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_calendar_events.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_calendar_events.json deleted file mode 100644 index 731c510767488..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_calendar_events.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "xpack-ml.get_calendar_events":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html", - "description":"Retrieves information about the scheduled events in calendars." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/calendars/{calendar_id}/events", - "methods":[ - "GET" - ], - "parts":{ - "calendar_id":{ - "type":"string", - "description":"The ID of the calendar containing the events" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "job_id":{ - "type":"string", - "description":"Get events for the job. When this option is used calendar_id must be '_all'" - }, - "start":{ - "type":"string", - "description":"Get events after this time" - }, - "end":{ - "type":"date", - "description":"Get events before this time" - }, - "from":{ - "type":"int", - "description":"Skips a number of events" - }, - "size":{ - "type":"int", - "description":"Specifies a max number of events to get" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_calendars.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_calendars.json deleted file mode 100644 index c80005451ff0b..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_calendars.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "xpack-ml.get_calendars":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html", - "description":"Retrieves configuration information for calendars." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/calendars", - "methods":[ - "GET", - "POST" - ] - }, - { - "path":"/_xpack/ml/calendars/{calendar_id}", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "calendar_id":{ - "type":"string", - "description":"The ID of the calendar to fetch" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "from":{ - "type":"int", - "description":"skips a number of calendars" - }, - "size":{ - "type":"int", - "description":"specifies a max number of calendars to get" - } - }, - "body":{ - "description":"The from and size parameters optionally sent in the body" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_categories.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_categories.json deleted file mode 100644 index 4fce55f682248..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_categories.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "xpack-ml.get_categories":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html", - "description":"Retrieves anomaly detection job results for one or more categories." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/results/categories/{category_id}", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The name of the job" - }, - "category_id":{ - "type":"long", - "description":"The identifier of the category definition of interest" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/results/categories", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The name of the job" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "from":{ - "type":"int", - "description":"skips a number of categories" - }, - "size":{ - "type":"int", - "description":"specifies a max number of categories to get" - }, - "partition_field_value":{ - "type":"string", - "description":"Specifies the partition to retrieve categories for. This is optional, and should never be used for jobs where per-partition categorization is disabled." - } - }, - "body":{ - "description":"Category selection details if not provided in URI" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_datafeed_stats.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_datafeed_stats.json deleted file mode 100644 index 1c63df2a33fef..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_datafeed_stats.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "xpack-ml.get_datafeed_stats":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html", - "description":"Retrieves usage information for datafeeds." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/datafeeds/{datafeed_id}/_stats", - "methods":[ - "GET" - ], - "parts":{ - "datafeed_id":{ - "type":"string", - "description":"The ID of the datafeeds stats to fetch" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/ml/datafeeds/_stats", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "allow_no_match":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)" - }, - "allow_no_datafeeds":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)", - "deprecated":true - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_datafeeds.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_datafeeds.json deleted file mode 100644 index 14f8a3f65397d..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_datafeeds.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "xpack-ml.get_datafeeds":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html", - "description":"Retrieves configuration information for datafeeds." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/datafeeds/{datafeed_id}", - "methods":[ - "GET" - ], - "parts":{ - "datafeed_id":{ - "type":"string", - "description":"The ID of the datafeeds to fetch" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/ml/datafeeds", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "allow_no_match":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)" - }, - "allow_no_datafeeds":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)", - "deprecated":true - }, - "exclude_generated": { - "required": false, - "type": "boolean", - "default": false, - "description": "Omits fields that are illegal to set on datafeed PUT" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_filters.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_filters.json deleted file mode 100644 index 886f289a75096..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_filters.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "xpack-ml.get_filters":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html", - "description":"Retrieves filters." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/filters", - "methods":[ - "GET" - ] - }, - { - "path":"/_xpack/ml/filters/{filter_id}", - "methods":[ - "GET" - ], - "parts":{ - "filter_id":{ - "type":"string", - "description":"The ID of the filter to fetch" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "from":{ - "type":"int", - "description":"skips a number of filters" - }, - "size":{ - "type":"int", - "description":"specifies a max number of filters to get" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_influencers.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_influencers.json deleted file mode 100644 index d039f14175f09..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_influencers.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "xpack-ml.get_influencers":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html", - "description":"Retrieves anomaly detection job results for one or more influencers." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/results/influencers", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"Identifier for the anomaly detection job" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "exclude_interim":{ - "type":"boolean", - "description":"Exclude interim results" - }, - "from":{ - "type":"int", - "description":"skips a number of influencers" - }, - "size":{ - "type":"int", - "description":"specifies a max number of influencers to get" - }, - "start":{ - "type":"string", - "description":"start timestamp for the requested influencers" - }, - "end":{ - "type":"string", - "description":"end timestamp for the requested influencers" - }, - "influencer_score":{ - "type":"double", - "description":"influencer score threshold for the requested influencers" - }, - "sort":{ - "type":"string", - "description":"sort field for the requested influencers" - }, - "desc":{ - "type":"boolean", - "description":"whether the results should be sorted in decending order" - } - }, - "body":{ - "description":"Influencer selection criteria" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_job_stats.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_job_stats.json deleted file mode 100644 index ea6154916c155..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_job_stats.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "xpack-ml.get_job_stats":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html", - "description":"Retrieves usage information for anomaly detection jobs." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/_stats", - "methods":[ - "GET" - ] - }, - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/_stats", - "methods":[ - "GET" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the jobs stats to fetch" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "allow_no_match":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)" - }, - "allow_no_jobs":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)", - "deprecated":true - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_jobs.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_jobs.json deleted file mode 100644 index 70fb374b846fe..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_jobs.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "xpack-ml.get_jobs":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html", - "description":"Retrieves configuration information for anomaly detection jobs." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}", - "methods":[ - "GET" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the jobs to fetch" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/ml/anomaly_detectors", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "allow_no_match":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)" - }, - "allow_no_jobs":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)", - "deprecated":true - }, - "exclude_generated": { - "required": false, - "type": "boolean", - "default": false, - "description": "Omits fields that are illegal to set on job PUT" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_model_snapshots.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_model_snapshots.json deleted file mode 100644 index 44120f655960e..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_model_snapshots.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "xpack-ml.get_model_snapshots":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html", - "description":"Retrieves information about model snapshots." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to fetch" - }, - "snapshot_id":{ - "type":"string", - "description":"The ID of the snapshot to fetch" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to fetch" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "from":{ - "type":"int", - "description":"Skips a number of documents" - }, - "size":{ - "type":"int", - "description":"The default number of documents returned in queries as a string." - }, - "start":{ - "type":"date", - "description":"The filter 'start' query parameter" - }, - "end":{ - "type":"date", - "description":"The filter 'end' query parameter" - }, - "sort":{ - "type":"string", - "description":"Name of the field to sort on" - }, - "desc":{ - "type":"boolean", - "description":"True if the results should be sorted in descending order" - } - }, - "body":{ - "description":"Model snapshot selection criteria" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_overall_buckets.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_overall_buckets.json deleted file mode 100644 index d18e0c1be3dfa..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_overall_buckets.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "xpack-ml.get_overall_buckets":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html", - "description":"Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/results/overall_buckets", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The job IDs for which to calculate overall bucket results" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "top_n":{ - "type":"int", - "description":"The number of top job bucket scores to be used in the overall_score calculation" - }, - "bucket_span":{ - "type":"string", - "description":"The span of the overall buckets. Defaults to the longest job bucket_span" - }, - "overall_score":{ - "type":"double", - "description":"Returns overall buckets with overall scores higher than this value" - }, - "exclude_interim":{ - "type":"boolean", - "description":"If true overall buckets that include interim buckets will be excluded" - }, - "start":{ - "type":"string", - "description":"Returns overall buckets with timestamps after this time" - }, - "end":{ - "type":"string", - "description":"Returns overall buckets with timestamps earlier than this time" - }, - "allow_no_match":{ - "type":"boolean", - "description":"Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)" - }, - "allow_no_jobs":{ - "type":"boolean", - "description":"Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)", - "deprecated":true - } - }, - "body":{ - "description":"Overall bucket selection details if not provided in URI" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_records.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_records.json deleted file mode 100644 index a930001a0e319..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.get_records.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "xpack-ml.get_records":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html", - "description":"Retrieves anomaly records for an anomaly detection job." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/results/records", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "exclude_interim":{ - "type":"boolean", - "description":"Exclude interim results" - }, - "from":{ - "type":"int", - "description":"skips a number of records" - }, - "size":{ - "type":"int", - "description":"specifies a max number of records to get" - }, - "start":{ - "type":"string", - "description":"Start time filter for records" - }, - "end":{ - "type":"string", - "description":"End time filter for records" - }, - "record_score":{ - "type":"double", - "description":"Returns records with anomaly scores greater or equal than this value" - }, - "sort":{ - "type":"string", - "description":"Sort records by a particular field" - }, - "desc":{ - "type":"boolean", - "description":"Set the sort direction" - } - }, - "body":{ - "description":"Record selection criteria" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.info.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.info.json deleted file mode 100644 index a21bf7986ac20..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.info.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "xpack-ml.info":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html", - "description":"Returns defaults and limits used by machine learning." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/info", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.open_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.open_job.json deleted file mode 100644 index e464c3743695e..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.open_job.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-ml.open_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html", - "description":"Opens one or more anomaly detection jobs." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/_open", - "methods":[ - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to open" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.post_calendar_events.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.post_calendar_events.json deleted file mode 100644 index ca754b25a0d06..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.post_calendar_events.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "xpack-ml.post_calendar_events":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html", - "description":"Posts scheduled events in a calendar." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/calendars/{calendar_id}/events", - "methods":[ - "POST" - ], - "parts":{ - "calendar_id":{ - "type":"string", - "description":"The ID of the calendar to modify" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "body":{ - "description":"A list of events", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.post_data.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.post_data.json deleted file mode 100644 index 21d4687e54119..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.post_data.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "xpack-ml.post_data":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html", - "description":"Sends data to an anomaly detection job for analysis." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/x-ndjson", "application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/_data", - "methods":[ - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The name of the job receiving the data" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "reset_start":{ - "type":"string", - "description":"Optional parameter to specify the start of the bucket resetting range" - }, - "reset_end":{ - "type":"string", - "description":"Optional parameter to specify the end of the bucket resetting range" - } - }, - "body":{ - "description":"The data to process", - "required":true, - "serialize":"bulk" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.preview_datafeed.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.preview_datafeed.json deleted file mode 100644 index c14869ff12426..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.preview_datafeed.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "xpack-ml.preview_datafeed":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html", - "description":"Previews a datafeed." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/datafeeds/{datafeed_id}/_preview", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "datafeed_id":{ - "type":"string", - "description":"The ID of the datafeed to preview" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/ml/datafeeds/_preview", - "methods":[ - "GET", - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "body":{ - "description":"The datafeed config and job config with which to execute the preview", - "required":false - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_calendar.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_calendar.json deleted file mode 100644 index d44674646e71d..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_calendar.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "xpack-ml.put_calendar":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html", - "description":"Instantiates a calendar." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/calendars/{calendar_id}", - "methods":[ - "PUT" - ], - "parts":{ - "calendar_id":{ - "type":"string", - "description":"The ID of the calendar to create" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "body":{ - "description":"The calendar details", - "required":false - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_calendar_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_calendar_job.json deleted file mode 100644 index 3351ba74cf961..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_calendar_job.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "xpack-ml.put_calendar_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html", - "description":"Adds an anomaly detection job to a calendar." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/calendars/{calendar_id}/jobs/{job_id}", - "methods":[ - "PUT" - ], - "parts":{ - "calendar_id":{ - "type":"string", - "description":"The ID of the calendar to modify" - }, - "job_id":{ - "type":"string", - "description":"The ID of the job to add to the calendar" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_datafeed.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_datafeed.json deleted file mode 100644 index 3e2700899214b..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_datafeed.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "xpack-ml.put_datafeed":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html", - "description":"Instantiates a datafeed." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/datafeeds/{datafeed_id}", - "methods":[ - "PUT" - ], - "parts":{ - "datafeed_id":{ - "type":"string", - "description":"The ID of the datafeed to create" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "body":{ - "description":"The datafeed config", - "required":true - }, - "params":{ - "ignore_unavailable":{ - "type":"boolean", - "description":"Ignore unavailable indexes (default: false)" - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Ignore if the source indices expressions resolves to no concrete indices (default: true)" - }, - "ignore_throttled":{ - "type":"boolean", - "description":"Ignore indices that are marked as throttled (default: true)" - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "hidden", - "none", - "all" - ], - "description":"Whether source index expressions should get expanded to open or closed indices (default: open)" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_filter.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_filter.json deleted file mode 100644 index a36f748bb6bb5..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_filter.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "xpack-ml.put_filter":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html", - "description":"Instantiates a filter." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/filters/{filter_id}", - "methods":[ - "PUT" - ], - "parts":{ - "filter_id":{ - "type":"string", - "description":"The ID of the filter to create" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "body":{ - "description":"The filter details", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_job.json deleted file mode 100644 index 3c1798170969c..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.put_job.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "xpack-ml.put_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html", - "description":"Instantiates an anomaly detection job." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}", - "methods":[ - "PUT" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to create" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "ignore_unavailable":{ - "type":"boolean", - "description":"Ignore unavailable indexes (default: false). Only set if datafeed_config is provided." - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Ignore if the source indices expressions resolves to no concrete indices (default: true). Only set if datafeed_config is provided." - }, - "ignore_throttled":{ - "type":"boolean", - "description":"Ignore indices that are marked as throttled (default: true). Only set if datafeed_config is provided." - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "hidden", - "none", - "all" - ], - "description":"Whether source index expressions should get expanded to open or closed indices (default: open). Only set if datafeed_config is provided." - } - }, - "body":{ - "description":"The job", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.revert_model_snapshot.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.revert_model_snapshot.json deleted file mode 100644 index 803595f9ec0b2..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.revert_model_snapshot.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "xpack-ml.revert_model_snapshot":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html", - "description":"Reverts to a specific snapshot." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_revert", - "methods":[ - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to fetch" - }, - "snapshot_id":{ - "type":"string", - "description":"The ID of the snapshot to revert to" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "delete_intervening_results":{ - "type":"boolean", - "description":"Should we reset the results back to the time of the snapshot?" - } - }, - "body":{ - "description":"Reversion options" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.set_upgrade_mode.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.set_upgrade_mode.json deleted file mode 100644 index 1967d8a6262fd..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.set_upgrade_mode.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "xpack-ml.set_upgrade_mode":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html", - "description":"Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/set_upgrade_mode", - "methods":[ - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "enabled":{ - "type":"boolean", - "description":"Whether to enable upgrade_mode ML setting or not. Defaults to false." - }, - "timeout":{ - "type":"time", - "description":"Controls the time to wait before action times out. Defaults to 30 seconds" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.start_datafeed.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.start_datafeed.json deleted file mode 100644 index cef069eaa153d..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.start_datafeed.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "xpack-ml.start_datafeed":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html", - "description":"Starts one or more datafeeds." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/datafeeds/{datafeed_id}/_start", - "methods":[ - "POST" - ], - "parts":{ - "datafeed_id":{ - "type":"string", - "description":"The ID of the datafeed to start" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "start":{ - "type":"string", - "required":false, - "description":"The start time from where the datafeed should begin" - }, - "end":{ - "type":"string", - "required":false, - "description":"The end time when the datafeed should stop. When not set, the datafeed continues in real time" - }, - "timeout":{ - "type":"time", - "required":false, - "description":"Controls the time to wait until a datafeed has started. Default to 20 seconds" - } - }, - "body":{ - "description":"The start datafeed parameters" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.stop_datafeed.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.stop_datafeed.json deleted file mode 100644 index b09d536f05b36..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.stop_datafeed.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "xpack-ml.stop_datafeed":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html", - "description":"Stops one or more datafeeds." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/datafeeds/{datafeed_id}/_stop", - "methods":[ - "POST" - ], - "parts":{ - "datafeed_id":{ - "type":"string", - "description":"The ID of the datafeed to stop" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "allow_no_match":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)" - }, - "allow_no_datafeeds":{ - "type":"boolean", - "required":false, - "description":"Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)", - "deprecated":true - }, - "force":{ - "type":"boolean", - "required":false, - "description":"True if the datafeed should be forcefully stopped." - }, - "timeout":{ - "type":"time", - "required":false, - "description":"Controls the time to wait until a datafeed has stopped. Default to 20 seconds" - } - }, - "body":{ - "description":"The URL params optionally sent in the body", - "required":false - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_datafeed.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_datafeed.json deleted file mode 100644 index ccddff3dd011b..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_datafeed.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "xpack-ml.update_datafeed":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html", - "description":"Updates certain properties of a datafeed." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/datafeeds/{datafeed_id}/_update", - "methods":[ - "POST" - ], - "parts":{ - "datafeed_id":{ - "type":"string", - "description":"The ID of the datafeed to update" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "body":{ - "description":"The datafeed update settings", - "required":true - }, - "params":{ - "ignore_unavailable":{ - "type":"boolean", - "description":"Ignore unavailable indexes (default: false)" - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Ignore if the source indices expressions resolves to no concrete indices (default: true)" - }, - "ignore_throttled":{ - "type":"boolean", - "description":"Ignore indices that are marked as throttled (default: true)" - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "hidden", - "none", - "all" - ], - "description":"Whether source index expressions should get expanded to open or closed indices (default: open)" - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_filter.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_filter.json deleted file mode 100644 index 00e58675e0dbe..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_filter.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "xpack-ml.update_filter":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html", - "description":"Updates the description of a filter, adds items, or removes items." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/filters/{filter_id}/_update", - "methods":[ - "POST" - ], - "parts":{ - "filter_id":{ - "type":"string", - "description":"The ID of the filter to update" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "body":{ - "description":"The filter update", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_job.json deleted file mode 100644 index 62e79020ac764..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_job.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "xpack-ml.update_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html", - "description":"Updates certain properties of an anomaly detection job." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/_update", - "methods":[ - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to create" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "body":{ - "description":"The job update settings", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_model_snapshot.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_model_snapshot.json deleted file mode 100644 index cd72375d6ae4a..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.update_model_snapshot.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "xpack-ml.update_model_snapshot":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html", - "description":"Updates certain properties of a snapshot." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_update", - "methods":[ - "POST" - ], - "parts":{ - "job_id":{ - "type":"string", - "description":"The ID of the job to fetch" - }, - "snapshot_id":{ - "type":"string", - "description":"The ID of the snapshot to update" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{}, - "body":{ - "description":"The model snapshot properties to update", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json deleted file mode 100644 index ad337c3c1ad82..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-ml.validate":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html", - "description":"Validates an anomaly detection job." - }, - "stability":"stable", - "visibility":"private", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/_validate", - "methods":[ - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{}, - "body":{ - "description":"The job config", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json deleted file mode 100644 index 5a06df8977dfc..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-ml.validate_detector":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html", - "description":"Validates an anomaly detection detector." - }, - "stability":"stable", - "visibility":"private", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ml/anomaly_detectors/_validate/detector", - "methods":[ - "POST" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{}, - "body":{ - "description":"The detector", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-monitoring.bulk.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-monitoring.bulk.json deleted file mode 100644 index bf37993fb7862..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-monitoring.bulk.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "xpack-monitoring.bulk":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/monitor-elasticsearch-cluster.html", - "description":"Used by the monitoring features to send monitoring data." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/x-ndjson;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/monitoring/_bulk", - "methods":[ - "POST", - "PUT" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "system_id":{ - "type":"string", - "description":"Identifier of the monitored system" - }, - "system_api_version":{ - "type":"string", - "description":"API Version of the monitored system" - }, - "interval":{ - "type":"string", - "description":"Collection interval (e.g., '10s' or '10000ms') of the payload" - } - }, - "body":{ - "description":"The operation definition and data (action-data pairs), separated by newlines", - "required":true, - "serialize":"bulk" - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.delete_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.delete_job.json deleted file mode 100644 index 3c23fddd7e4fa..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.delete_job.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-rollup.delete_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job.html", - "description":"Deletes an existing rollup job." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/rollup/job/{id}", - "methods":[ - "DELETE" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The ID of the job to delete" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.get_jobs.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.get_jobs.json deleted file mode 100644 index 6b98664cf17ee..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.get_jobs.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "xpack-rollup.get_jobs":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.html", - "description":"Retrieves the configuration, stats, and status of rollup jobs." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/rollup/job/{id}", - "methods":[ - "GET" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/rollup/job/", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.get_rollup_caps.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.get_rollup_caps.json deleted file mode 100644 index a8b95d00b6c3b..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.get_rollup_caps.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "xpack-rollup.get_rollup_caps":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-caps.html", - "description":"Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/rollup/data/{id}", - "methods":[ - "GET" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The ID of the index to check rollup capabilities on, or left blank for all jobs" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - }, - { - "path":"/_xpack/rollup/data/", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.get_rollup_index_caps.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.get_rollup_index_caps.json deleted file mode 100644 index 38c9af891addb..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.get_rollup_index_caps.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-rollup.get_rollup_index_caps":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html", - "description":"Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored)." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/{index}/_xpack/rollup/data", - "methods":[ - "GET" - ], - "parts":{ - "index":{ - "type":"string", - "description":"The rollup index or index pattern to obtain rollup capabilities from." - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.put_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.put_job.json deleted file mode 100644 index 865a6f448193f..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.put_job.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "xpack-rollup.put_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.html", - "description":"Creates a rollup job." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/rollup/job/{id}", - "methods":[ - "PUT" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The ID of the job to create" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "body":{ - "description":"The job configuration", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.start_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.start_job.json deleted file mode 100644 index aebaf859ce9e2..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.start_job.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "xpack-rollup.start_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job.html", - "description":"Starts an existing, stopped rollup job." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/rollup/job/{id}/_start", - "methods":[ - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The ID of the job to start" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.stop_job.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.stop_job.json deleted file mode 100644 index 62eada2313f2a..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-rollup.stop_job.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "xpack-rollup.stop_job":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html", - "description":"Stops an existing, started rollup job." - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/rollup/job/{id}/_stop", - "methods":[ - "POST" - ], - "parts":{ - "id":{ - "type":"string", - "description":"The ID of the job to stop" - } - }, - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{ - "wait_for_completion":{ - "type":"boolean", - "required":false, - "description":"True if the API should block until the job has fully stopped, false if should be executed async. Defaults to false." - }, - "timeout":{ - "type":"time", - "required":false, - "description":"Block for (at maximum) the specified duration while waiting for the job to stop. Defaults to 30s." - } - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-sql.clear_cursor.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-sql.clear_cursor.json deleted file mode 100644 index ee706fc0736cd..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-sql.clear_cursor.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "xpack-sql.clear_cursor":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-sql-cursor-api.html", - "description":"Clears the SQL cursor" - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/sql/close", - "methods":[ - "POST" - ] - } - ] - }, - "body":{ - "description":"Specify the cursor value in the `cursor` element to clean the cursor.", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-sql.query.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-sql.query.json deleted file mode 100644 index d153e0f3484c3..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-sql.query.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "xpack-sql.query":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-search-api.html", - "description":"Executes a SQL request" - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/sql", - "methods":[ - "POST", - "GET" - ] - } - ] - }, - "params":{ - "format":{ - "type":"string", - "description":"a short version of the Accept header, e.g. json, yaml" - } - }, - "body":{ - "description":"Use the `query` element to start a query. Use the `cursor` element to continue a query.", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-sql.translate.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-sql.translate.json deleted file mode 100644 index 1fad4a643dbb9..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-sql.translate.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "xpack-sql.translate":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate-api.html", - "description":"Translates SQL into Elasticsearch queries" - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/sql/translate", - "methods":[ - "POST", - "GET" - ] - } - ] - }, - "params":{}, - "body":{ - "description":"Specify the query in the `query` element.", - "required":true - } - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ssl.certificates.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ssl.certificates.json deleted file mode 100644 index 7d25b0bf8f4f3..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ssl.certificates.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "xpack-ssl.certificates":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html", - "description":"Retrieves information about the X.509 certificates used to encrypt communications in the cluster." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_xpack/ssl/certificates", - "methods":[ - "GET" - ], - "deprecated":{ - "version":"7.0.0", - "description":"all _xpack prefix have been deprecated" - } - } - ] - }, - "params":{} - } -} diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/test/monitoring.bulk/10_basic_compat.yml b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/test/monitoring.bulk/10_basic_compat.yml deleted file mode 100644 index a88f67b394783..0000000000000 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/test/monitoring.bulk/10_basic_compat.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "warnings_regex" - ---- -"Bulk indexing of monitoring data": - - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - xpack-monitoring.bulk: - system_id: "kibana" - system_api_version: "6" - interval: "10s" - body: - - index: - _type: test_type - - avg-cpu: - user: 13.26 - nice: 0.17 - system: 1.51 - iowait: 0.85 - idle: 84.20 - warnings_regex: - - "\\[.* /_xpack/monitoring/_bulk\\] is deprecated! Use \\[.* /_monitoring/bulk\\] instead." - - - is_false: errors