From 7599c051f366be648d4a3f53e687707b8da7bb54 Mon Sep 17 00:00:00 2001 From: Rudolf Meijering Date: Thu, 14 Jan 2021 13:37:55 +0100 Subject: [PATCH 1/3] Handle another node already having deleted the temporary index --- .../integration_tests/parallel.sh | 85 +++++++++++++++++++ .../saved_objects/migrationsv2/model.test.ts | 12 ++- .../saved_objects/migrationsv2/model.ts | 20 +++-- 3 files changed, 110 insertions(+), 7 deletions(-) create mode 100755 src/core/server/saved_objects/migrationsv2/integration_tests/parallel.sh diff --git a/src/core/server/saved_objects/migrationsv2/integration_tests/parallel.sh b/src/core/server/saved_objects/migrationsv2/integration_tests/parallel.sh new file mode 100755 index 0000000000000..9dc6ff48463f8 --- /dev/null +++ b/src/core/server/saved_objects/migrationsv2/integration_tests/parallel.sh @@ -0,0 +1,85 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#!/bin/bash + +# +# Script to run multiple kibana instances in parallel. +# Make sure to run the script from kibana root directory. +# +# NOTE: This is not run during CI but helps with manual testing +# +# bash parallel.sh [options] +# functions: +# start [instances] - start multiple kibanas (3 default) +# es - run elasticsearch with 7.7.2 snapshot data +# tail - show logs of all kibanas +# kill - kills all started kibana processes +# clean - clean up nohup files +# kibana_index - search .kibana index against es +# + +FN="$1" +NUM="$2" + +if [ "${FN}" == "kill" ]; then + echo "killing main processes" + for pid in $(cat processes.out); do kill -9 $pid; done + echo "killing trailing processes" + for pid in $(pgrep -f scripts/kibana); do kill -9 $pid; done + exit 0; +fi + +if [ "${FN}" == "tail" ]; then + tail -f nohup_* + exit 0; +fi + +if [ "${FN}" == "clean" ]; then + rm -r nohup_*.out + rm processes.out + exit 0; +fi + +if [ "${FN}" == "es" ]; then + yarn es snapshot --data-archive=src/core/server/saved_objects/migrationsv2/integration_tests/archives/7.7.2_xpack_100k_obj.zip + exit 0; +fi + +if [ "${FN}" == "kibana_index" ]; then + # search the kibana index + curl -XPOST http://elastic:changeme@localhost:9200/.kibana/_search -u elastic:changeme -d '' | jq + exit 0; +fi + +if [ "${FN}" == "start" ]; then + if test ! "${NUM-}"; then + NUM=3 + fi + node scripts/build_kibana_platform_plugins --no-examples + rm processes.out + for i in $(seq 0 $(expr $NUM - 1)) + do + PORT="56${i}1" + PROXY="56${i}3" + echo "starting kibana on port $PORT" + nohup node scripts/kibana.js --dev.basePathProxyTarget=$PROXY --server.port=$PORT --migrations.enableV2=true --dev --no-watch --no-optimizer > nohup_$i.out & + PROCESS_ID=$! + echo "${PROCESS_ID}" >> processes.out + done + exit 0; +fi diff --git a/src/core/server/saved_objects/migrationsv2/model.test.ts b/src/core/server/saved_objects/migrationsv2/model.test.ts index ab7c47389f539..ce20d2677f8e5 100644 --- a/src/core/server/saved_objects/migrationsv2/model.test.ts +++ b/src/core/server/saved_objects/migrationsv2/model.test.ts @@ -922,7 +922,7 @@ describe('migrations v2 model', () => { expect(newState.retryCount).toEqual(0); expect(newState.retryDelay).toEqual(0); }); - test('MARK_VERSION_INDEX_READY -> MARK_VERSION_INDEX_CONFLICT if someone else removed the current alias from the source index', () => { + test('MARK_VERSION_INDEX_READY -> MARK_VERSION_INDEX_CONFLICT if another removed the current alias from the source index', () => { const res: ResponseType<'MARK_VERSION_INDEX_READY'> = Either.left({ type: 'alias_not_found_exception', }); @@ -931,6 +931,16 @@ describe('migrations v2 model', () => { expect(newState.retryCount).toEqual(0); expect(newState.retryDelay).toEqual(0); }); + test('MARK_VERSION_INDEX_READY -> MARK_VERSION_INDEX_CONFLICT if another node removed the temporary index', () => { + const res: ResponseType<'MARK_VERSION_INDEX_READY'> = Either.left({ + type: 'index_not_found_exception', + index: '.kibana_7.11.0_reindex_temp', + }); + const newState = model(markVersionIndexReadyState, res); + expect(newState.controlState).toEqual('MARK_VERSION_INDEX_READY_CONFLICT'); + expect(newState.retryCount).toEqual(0); + expect(newState.retryDelay).toEqual(0); + }); }); describe('MARK_VERSION_INDEX_READY_CONFLICT', () => { const aliasActions = Option.some([Symbol('alias action')] as unknown) as Option.Some< diff --git a/src/core/server/saved_objects/migrationsv2/model.ts b/src/core/server/saved_objects/migrationsv2/model.ts index 5fb4c3e29538f..ae3760caf51a2 100644 --- a/src/core/server/saved_objects/migrationsv2/model.ts +++ b/src/core/server/saved_objects/migrationsv2/model.ts @@ -648,12 +648,20 @@ export const model = (currentState: State, resW: ResponseType): // alias_not_found_exception another instance has completed a // migration from the same source. return { ...stateP, controlState: 'MARK_VERSION_INDEX_READY_CONFLICT' }; - } else if ( - left.type === 'remove_index_not_a_concrete_index' || - left.type === 'index_not_found_exception' - ) { - // We don't handle these errors as the migration algorithm will never - // cause them to occur (these are only relevant to the LEGACY_DELETE + } else if (left.type === 'index_not_found_exception') { + if (left.index === stateP.tempIndex) { + // another instance has already completed the migration and deleted + // the temporary index + return { ...stateP, controlState: 'MARK_VERSION_INDEX_READY_CONFLICT' }; + } else { + // The migration algorithm will never cause a + // index_not_found_exception for an index other than the temporary + // index handled above. + throwBadResponse(stateP, left as never); + } + } else if (left.type === 'remove_index_not_a_concrete_index') { + // We don't handle this error as the migration algorithm will never + // cause it to occur (this error is only relevant to the LEGACY_DELETE // step). throwBadResponse(stateP, left as never); } else { From 3fb008870cf93d44d20fdd08cadec85d29c6a20f Mon Sep 17 00:00:00 2001 From: Rudolf Meijering Date: Fri, 15 Jan 2021 14:24:52 +0100 Subject: [PATCH 2/3] Make run_multiple_kibana_nodes.sh script more generic --- .../scripts/run_multiple_kibana_nodes.sh | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) rename src/core/server/saved_objects/migrationsv2/integration_tests/parallel.sh => test/scripts/run_multiple_kibana_nodes.sh (80%) diff --git a/src/core/server/saved_objects/migrationsv2/integration_tests/parallel.sh b/test/scripts/run_multiple_kibana_nodes.sh similarity index 80% rename from src/core/server/saved_objects/migrationsv2/integration_tests/parallel.sh rename to test/scripts/run_multiple_kibana_nodes.sh index 9dc6ff48463f8..b62e6252da3cd 100755 --- a/src/core/server/saved_objects/migrationsv2/integration_tests/parallel.sh +++ b/test/scripts/run_multiple_kibana_nodes.sh @@ -18,15 +18,13 @@ #!/bin/bash # -# Script to run multiple kibana instances in parallel. +# Script to run multiple kibana nodes in parallel on the same machine. # Make sure to run the script from kibana root directory. # -# NOTE: This is not run during CI but helps with manual testing -# -# bash parallel.sh [options] +# bash test/scripts/run_multiple_kibana_nodes.sh [options] # functions: -# start [instances] - start multiple kibanas (3 default) -# es - run elasticsearch with 7.7.2 snapshot data +# start [instances] [args] - start multiple kibanas (3 default) +# es [args] - run elasticsearch # tail - show logs of all kibanas # kill - kills all started kibana processes # clean - clean up nohup files @@ -34,7 +32,6 @@ # FN="$1" -NUM="$2" if [ "${FN}" == "kill" ]; then echo "killing main processes" @@ -56,7 +53,8 @@ if [ "${FN}" == "clean" ]; then fi if [ "${FN}" == "es" ]; then - yarn es snapshot --data-archive=src/core/server/saved_objects/migrationsv2/integration_tests/archives/7.7.2_xpack_100k_obj.zip + ARGS="$2" + yarn es snapshot $ARGS exit 0; fi @@ -67,6 +65,8 @@ if [ "${FN}" == "kibana_index" ]; then fi if [ "${FN}" == "start" ]; then + NUM="$2" + ARGS="$3" if test ! "${NUM-}"; then NUM=3 fi @@ -77,7 +77,7 @@ if [ "${FN}" == "start" ]; then PORT="56${i}1" PROXY="56${i}3" echo "starting kibana on port $PORT" - nohup node scripts/kibana.js --dev.basePathProxyTarget=$PROXY --server.port=$PORT --migrations.enableV2=true --dev --no-watch --no-optimizer > nohup_$i.out & + nohup node scripts/kibana.js --dev.basePathProxyTarget=$PROXY --server.port=$PORT --dev --no-watch --no-optimizer --no-base-path $ARGS > nohup_$i.out & PROCESS_ID=$! echo "${PROCESS_ID}" >> processes.out done From aa5290067ab90e3d38274ee928828c6352fe7fb9 Mon Sep 17 00:00:00 2001 From: Rudolf Meijering Date: Thu, 21 Jan 2021 10:27:15 +0100 Subject: [PATCH 3/3] Add note about dependency on jq --- test/scripts/run_multiple_kibana_nodes.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/scripts/run_multiple_kibana_nodes.sh b/test/scripts/run_multiple_kibana_nodes.sh index b62e6252da3cd..f5661c19bed11 100755 --- a/test/scripts/run_multiple_kibana_nodes.sh +++ b/test/scripts/run_multiple_kibana_nodes.sh @@ -19,7 +19,8 @@ # # Script to run multiple kibana nodes in parallel on the same machine. -# Make sure to run the script from kibana root directory. +# Make sure to run the script from kibana root directory. Some functions depend on the jq command-line utility +# being installed. # # bash test/scripts/run_multiple_kibana_nodes.sh [options] # functions: