From fe06eddbf2434adbdd4cc08ac8b0baee9e51593c Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 25 Feb 2024 21:30:44 +0100 Subject: [PATCH] Refactor update test Currently the update test is quite inconvenient to run locally and also inconvenient to debug as the different update tests all run in their own docker container. This patch refactors the update test to no longer require docker and make it easier to debug as it will run in the local environment as determined by pg_config. This patch also consolidates update/downgrade and repair test since they do very similar things and adds support for coredump stacktraces to the github action and removes some dead code from the update tests. Additionally the versions to be used in the update test are now determined from existing git tags so the post release patch no longer needs to add newly released versions. --- .github/workflows/update-test.yaml | 117 +++++++------ .gitignore | 3 +- scripts/test_downgrade.sh | 46 +++++ scripts/test_downgrade_from_tag.sh | 255 ---------------------------- scripts/test_functions.inc | 48 ------ scripts/test_repair_from_tag.sh | 126 -------------- scripts/test_repairs.sh | 50 ------ scripts/test_update_from_tag.sh | 228 ------------------------- scripts/test_update_from_version.sh | 137 +++++++++++++++ scripts/test_updates.sh | 200 ++++++++++++---------- scripts/test_updates_pg13.sh | 31 ---- scripts/test_updates_pg14.sh | 30 ---- scripts/test_updates_pg15.sh | 26 --- scripts/test_updates_pg16.sh | 19 --- test/sql/updates/README.md | 23 --- test/sql/updates/post.repair.sql | 24 +-- test/sql/updates/setup.repair.sql | 140 +-------------- 17 files changed, 358 insertions(+), 1145 deletions(-) create mode 100755 scripts/test_downgrade.sh delete mode 100755 scripts/test_downgrade_from_tag.sh delete mode 100644 scripts/test_functions.inc delete mode 100755 scripts/test_repair_from_tag.sh delete mode 100755 scripts/test_repairs.sh delete mode 100755 scripts/test_update_from_tag.sh create mode 100755 scripts/test_update_from_version.sh delete mode 100755 scripts/test_updates_pg13.sh delete mode 100755 scripts/test_updates_pg14.sh delete mode 100755 scripts/test_updates_pg15.sh delete mode 100755 scripts/test_updates_pg16.sh delete mode 100644 test/sql/updates/README.md diff --git a/.github/workflows/update-test.yaml b/.github/workflows/update-test.yaml index daf7d482057..b4496f3fbdb 100644 --- a/.github/workflows/update-test.yaml +++ b/.github/workflows/update-test.yaml @@ -6,24 +6,12 @@ name: Test Update and Downgrade - prerelease_test pull_request: jobs: - config: - runs-on: ubuntu-latest - outputs: - pg_latest: ${{ steps.setter.outputs.PG_LATEST }} - steps: - - name: Checkout source code - uses: actions/checkout@v4 - - name: Read configuration - id: setter - run: python .github/gh_config_reader.py - update_test: name: Update test PG${{ matrix.pg }} runs-on: 'ubuntu-latest' - needs: config strategy: matrix: - pg: ${{ fromJson(needs.config.outputs.pg_latest) }} + pg: [13, 14, 15, 16] fail-fast: false env: PG_VERSION: ${{ matrix.pg }} @@ -32,63 +20,74 @@ jobs: - name: Checkout TimescaleDB uses: actions/checkout@v4 - - name: Update tests ${{ matrix.pg }} + - name: Install Dependencies run: | - PG_MAJOR=$(echo "${{ matrix.pg }}" | sed -e 's![.].*!!') - ./scripts/test_updates_pg${PG_MAJOR}.sh + sudo apt-get update + sudo apt-get install gnupg systemd-coredump gdb postgresql-common libkrb5-dev + yes | sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh + echo "deb https://packagecloud.io/timescale/timescaledb/ubuntu/ $(lsb_release -c -s) main" | sudo tee /etc/apt/sources.list.d/timescaledb.list + wget --quiet -O - https://packagecloud.io/timescale/timescaledb/gpgkey | sudo apt-key add - + sudo apt-get update + sudo apt-get install postgresql-${{ matrix.pg }} postgresql-server-dev-${{ matrix.pg }} + sudo apt-get install -y --no-install-recommends timescaledb-2-postgresql-${{ matrix.pg }} + git fetch --tags - - name: Update diff - if: failure() + - name: Update tests PG${{ matrix.pg }} run: | - find . -name "update_test.*.diff.*" -maxdepth 1 | xargs -IFILE sh -c "echo '\nFILE\n';cat FILE" - - - name: Upload Artifacts - if: failure() - uses: actions/upload-artifact@v4 - with: - name: Extension update diff ${{ matrix.pg }} - path: update_test.*.diff.* + PATH="/usr/lib/postgresql/${{ matrix.pg }}/bin:$PATH" + ./scripts/test_updates.sh - downgrade_test: - name: Downgrade test PG${{ matrix.pg }} - runs-on: 'ubuntu-latest' - needs: config - strategy: - matrix: - pg: ${{ fromJson(needs.config.outputs.pg_latest) }} - fail-fast: false - env: - PG_VERSION: ${{ matrix.pg }} - POSTGRES_HOST_AUTH_METHOD: trust - GENERATE_DOWNGRADE_SCRIPT: ON - steps: - - name: Checkout TimescaleDB - uses: actions/checkout@v4 + - name: Downgrade tests PG${{ matrix.pg }} + if: always() + run: | + PATH="/usr/lib/postgresql/${{ matrix.pg }}/bin:$PATH" + ./scripts/test_downgrade.sh - # We need the tags to be able to build a downgrade script. - - name: Fetch all tags - run: git fetch --tags && git tag + - name: Update diff + if: failure() + run: | + find update_test -name "*.diff" | xargs -IFILE sh -c "echo '\nFILE\n';cat FILE" - - name: Downgrade tests ${{ matrix.pg }} - env: - TEST_VERSION: v7 + - name: Check for coredumps + if: always() + id: collectlogs run: | - DOWNGRADE_TO=$(grep '^downgrade_to_version = ' version.config | sed -e 's!^[^=]\+ = !!') - PG_MAJOR=$(echo "${{ matrix.pg }}" | sed -e 's![.].*!!') - UPDATE_FROM_TAG=${DOWNGRADE_TO}-pg${PG_MAJOR} - export UPDATE_FROM_TAG - # We need to use same libssl version used in the latest official TimescaleDB container images. - # So we will use the fixed alpine version, this will guarantee that libssl version won't change. - PG_IMAGE_TAG="${PG_VERSION}-alpine3.18" scripts/test_downgrade_from_tag.sh + # wait for in progress coredumps + sleep 10 + if coredumpctl list; then + echo "coredumps=true" >>$GITHUB_OUTPUT + false + fi - - name: Downgrade diff - if: failure() + - name: Stack trace + if: always() && steps.collectlogs.outputs.coredumps == 'true' run: | - find . -name "downgrade_test.*.diff.*" -maxdepth 1 | xargs -IFILE sh -c "echo '\nFILE\n';cat FILE" + sudo coredumpctl gdb <<<" + set verbose on + set trace-commands on + show debug-file-directory + printf "'"'"query = '%s'\n\n"'"'", (char *) debug_query_string + frame function ExceptionalCondition + printf "'"'"condition = '%s'\n"'"'", (char *) conditionName + up 1 + l + info args + info locals + bt full + " 2>&1 | tee stacktrace.log + ./scripts/bundle_coredumps.sh + false + + - name: Upload Coredumps + if: always() && steps.collectlogs.outputs.coredumps == 'true' + uses: actions/upload-artifact@v4 + with: + name: Coredumps sqlsmith ${{ matrix.os }} PG${{ matrix.pg }} + path: coredumps - name: Upload Artifacts if: failure() uses: actions/upload-artifact@v4 with: - name: Extension downgrade diff ${{ matrix.pg }} - path: downgrade_test.*.diff.* + name: Update test PG${{ matrix.pg }} + path: update_test diff --git a/.gitignore b/.gitignore index 12bb1c6864b..2a13ea077a8 100644 --- a/.gitignore +++ b/.gitignore @@ -17,7 +17,8 @@ typedef.list /test/testcluster /test/log /test/temp_schedule -/build +/build* +/update_test **/GPATH **/GTAGS **/GRTAGS diff --git a/scripts/test_downgrade.sh b/scripts/test_downgrade.sh new file mode 100755 index 00000000000..64ce431a913 --- /dev/null +++ b/scripts/test_downgrade.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -e + +SCRIPT_DIR=$(dirname $0) +PG_MAJOR_VERSION=$(pg_config --version | awk '{print $2}' | awk -F. '{print $1}') + +PG_EXTENSION_DIR=$(pg_config --sharedir)/extension +if [ "${CI:-false}" == true ]; then + GIT_REF=${GIT_REF:-$(git rev-parse HEAD)} +else + GIT_REF=$(git branch --show-current) +fi + + +BUILD_DIR="build_update_pg${PG_MAJOR_VERSION}" + +CURRENT_VERSION=$(grep '^version ' version.config | awk '{ print $3 }') +PREV_VERSION=$(grep '^downgrade_to_version ' version.config | awk '{ print $3 }') + +if [ ! -d "${BUILD_DIR}" ]; then + echo "Initializing build directory" + BUILD_DIR="${BUILD_DIR}" ./bootstrap -DCMAKE_BUILD_TYPE=Release -DWARNINGS_AS_ERRORS=OFF -DASSERTIONS=ON -DLINTER=ON -DGENERATE_DOWNGRADE_SCRIPT=ON -DREGRESS_CHECKS=OFF -DTAP_CHECKS=OFF +fi + +if [ ! -f "${PG_EXTENSION_DIR}/timescaledb--${PREV_VERSION}.sql" ]; then + echo "Building ${PREV_VERSION}" + git checkout ${PREV_VERSION} + make -C "${BUILD_DIR}" -j4 > /dev/null + sudo make -C "${BUILD_DIR}" install > /dev/null + git checkout ${GIT_REF} +fi + +# We want to use the latest loader for all the tests so we build it last +make -C "${BUILD_DIR}" -j4 +sudo make -C "${BUILD_DIR}" install > /dev/null + +set +e + +FROM_VERSION=${CURRENT_VERSION} TO_VERSION=${PREV_VERSION} TEST_VERSION=v8 TEST_REPAIR=false "${SCRIPT_DIR}/test_update_from_version.sh" +return_code=$? +if [ $return_code -ne 0 ]; then + echo -e "\nFailed downgrade from ${CURRENT_VERSION} to ${PREV_VERSION}\n" + exit 1 +fi + diff --git a/scripts/test_downgrade_from_tag.sh b/scripts/test_downgrade_from_tag.sh deleted file mode 100755 index 6ea33848b3f..00000000000 --- a/scripts/test_downgrade_from_tag.sh +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/env bash - -set -e -set -o pipefail - -SCRIPT_DIR=$(dirname $0) -BASE_DIR=${PWD}/${SCRIPT_DIR}/.. -WITH_SUPERUSER=true # Update tests have superuser privileges when running tests. -TEST_VERSION=${TEST_VERSION:-v2} -TEST_TMPDIR=${TEST_TMPDIR:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_downgrade_test' || mkdir -p /tmp/${RANDOM})} -UPDATE_PG_PORT=${UPDATE_PG_PORT:-6432} -CLEAN_PG_PORT=${CLEAN_PG_PORT:-6433} -PG_VERSION=${PG_VERSION:-14.3} -GIT_ID=$(git -C ${BASE_DIR} describe --dirty --always | sed -e "s|/|_|g") -UPDATE_FROM_IMAGE=${UPDATE_FROM_IMAGE:-timescale/timescaledb} -UPDATE_FROM_TAG=${UPDATE_FROM_TAG:-0.1.0} -UPDATE_TO_IMAGE=${UPDATE_TO_IMAGE:-downgrade_test} -UPDATE_TO_TAG=${UPDATE_TO_TAG:-${GIT_ID}} -DO_CLEANUP=${DO_CLEANUP:-true} -PGOPTS="-v TEST_VERSION=${TEST_VERSION} -v TEST_REPAIR=${TEST_REPAIR} -v WITH_SUPERUSER=${WITH_SUPERUSER} -v WITH_ROLES=true -v WITH_CHUNK=true" -GENERATE_DOWNGRADE_SCRIPT=${GENERATE_DOWNGRADE_SCRIPT:-ON} - -# The following variables are exported to called scripts. -export GENERATE_DOWNGRADE_SCRIPT PG_VERSION - -# PID of the current shell -PID=$$ - -# Container names. Append shell PID so that we can run this script in parallel -CONTAINER_ORIG=timescaledb-orig-${PID} -CONTAINER_CLEAN_RESTORE=timescaledb-clean-restore-${PID} -CONTAINER_UPDATED=timescaledb-updated-${PID} -CONTAINER_CLEAN_RERUN=timescaledb-clean-rerun-${PID} - -if [[ "$DO_CLEANUP" = "false" ]]; then - echo "!!Debug mode: Containers and temporary directory will be left on disk" -else - echo "!!Containers and temporary directory will be cleaned up" -fi - -trap cleanup EXIT - -remove_containers() { - docker rm -vf ${CONTAINER_ORIG} 2>/dev/null - docker rm -vf ${CONTAINER_CLEAN_RESTORE} 2>/dev/null - docker rm -vf ${CONTAINER_UPDATED} 2>/dev/null - docker rm -vf ${CONTAINER_CLEAN_RERUN} 2>/dev/null - docker volume rm -f ${CLEAN_VOLUME} 2>/dev/null - docker volume rm -f ${UPDATE_VOLUME} 2>/dev/null -} - -cleanup() { - # Save status here so that we can return the status of the last - # command in the script and not the last command of the cleanup - # function - local status="$?" - set +e # do not exit immediately on failure in cleanup handler - if [ "$DO_CLEANUP" = "true" ]; then - rm -rf ${TEST_TMPDIR} - sleep 1 - remove_containers - fi - echo "Test with pid ${PID} exited with code ${status}" - exit ${status} -} - -docker_exec() { - # Echo to stderr - >&2 echo -e "\033[1m$1\033[0m: $2" - docker exec $1 /bin/bash -c "$2" -} - -docker_logs() { - # Echo to stderr - >&2 echo -e "\033[1m$1\033[0m: $2" - docker logs $1 -} - -docker_pgcmd() { - local database=${3:-single} - echo "executing pgcmd on database $database" - set +e - if ! docker_exec $1 "psql -h localhost -U postgres -d $database $PGOPTS -v VERBOSITY=verbose -c \"$2\"" - then - docker_logs $1 - exit 1 - fi - set -e -} - -docker_pgscript() { - local database=${3:-single} - docker_exec $1 "psql --set VERBOSITY=verbose --set ECHO=all -h localhost -U postgres -d $database $PGOPTS -v ON_ERROR_STOP=1 -f $2" -} - -docker_pgtest() { - local database=${3:-single} - set +e - >&2 echo -e "\033[1m$1\033[0m: $2" - if ! docker exec $1 psql -X -v ECHO=ALL -v ON_ERROR_STOP=1 -h localhost -U postgres -d $database $PGOPTS -f $2 > ${TEST_TMPDIR}/$1.out - then - docker_logs $1 - exit 1 - fi - set -e -} - -docker_pgdiff_all() { - local database=${2:-single} - diff_file1=downgrade_test.restored.diff.${UPDATE_FROM_TAG} - diff_file2=downgrade_test.clean.diff.${UPDATE_FROM_TAG} - docker_pgtest ${CONTAINER_UPDATED} $1 $database - docker_pgtest ${CONTAINER_CLEAN_RESTORE} $1 $database - docker_pgtest ${CONTAINER_CLEAN_RERUN} $1 $database - echo "Diffing downgraded container vs restored. Downgraded: ${CONTAINER_UPDATED} restored: ${CONTAINER_CLEAN_RESTORE}" - diff -u ${TEST_TMPDIR}/${CONTAINER_UPDATED}.out ${TEST_TMPDIR}/${CONTAINER_CLEAN_RESTORE}.out | tee ${diff_file1} - if [ ! -s ${diff_file1} ]; then - rm ${diff_file1} - fi - echo "Diffing downgraded container vs clean run. Downgraded: ${CONTAINER_UPDATED} clean run: ${CONTAINER_CLEAN_RERUN}" - diff -u ${TEST_TMPDIR}/${CONTAINER_UPDATED}.out ${TEST_TMPDIR}/${CONTAINER_CLEAN_RERUN}.out | tee ${diff_file2} - if [ ! -s ${diff_file2} ]; then - rm ${diff_file2} - fi -} - -docker_run() { - docker run --env TIMESCALEDB_TELEMETRY=off --env POSTGRES_HOST_AUTH_METHOD=trust -d --name $1 -v ${BASE_DIR}:/src $2 -c timezone="GMT" -c max_prepared_transactions=100 - wait_for_pg $1 -} - -docker_run_vol() { - docker run --env TIMESCALEDB_TELEMETRY=off --env POSTGRES_HOST_AUTH_METHOD=trust -d --name $1 -v ${BASE_DIR}:/src -v $2 $3 -c timezone="GMT" -c max_prepared_transactions=100 - wait_for_pg $1 -} - -wait_for_pg() { - set +e - for _ in {1..20}; do - sleep 1 - - if docker_exec $1 "pg_isready -U postgres" - then - # this makes the test less flaky, although not - # ideal. Apparently, pg_isready is not always a good - # indication of whether the DB is actually ready to accept - # queries - sleep 1 - set -e - return 0 - fi - docker_logs $1 - - done - exit 1 -} - -# shellcheck disable=SC2001 # SC2001 -- See if you can use ${variable//search/replace} instead. -VERSION=$(echo ${UPDATE_FROM_TAG} | sed 's/\([0-9]\{0,\}\.[0-9]\{0,\}\.[0-9]\{0,\}\).*/\1/g') -echo "Testing from version ${VERSION} (test version ${TEST_VERSION})" -echo "Using temporary directory ${TEST_TMPDIR}" - -remove_containers || true - -IMAGE_NAME=${UPDATE_TO_IMAGE} TAG_NAME=${UPDATE_TO_TAG} PG_VERSION=${PG_VERSION} bash ${SCRIPT_DIR}/docker-build.sh - -echo "Launching containers" -docker_run ${CONTAINER_ORIG} ${UPDATE_FROM_IMAGE}:${UPDATE_FROM_TAG} -docker_run ${CONTAINER_CLEAN_RESTORE} ${UPDATE_FROM_IMAGE}:${UPDATE_FROM_TAG} -docker_run ${CONTAINER_CLEAN_RERUN} ${UPDATE_FROM_IMAGE}:${UPDATE_FROM_TAG} - -# Create roles for test. Roles must be created outside of regular -# setup scripts; they must be added separately to each instance since -# roles are not dumped by pg_dump. -docker_pgscript ${CONTAINER_ORIG} /src/test/sql/updates/setup.roles.sql "postgres" -docker_pgscript ${CONTAINER_CLEAN_RESTORE} /src/test/sql/updates/setup.roles.sql "postgres" -docker_pgscript ${CONTAINER_CLEAN_RERUN} /src/test/sql/updates/setup.roles.sql "postgres" - -CLEAN_VOLUME=$(docker inspect ${CONTAINER_CLEAN_RESTORE} --format='{{range .Mounts }}{{.Name}}{{end}}') -UPDATE_VOLUME=$(docker inspect ${CONTAINER_ORIG} --format='{{range .Mounts }}{{.Name}}{{end}}') - -echo "Executing setup script on container running ${UPDATE_FROM_IMAGE}:${UPDATE_FROM_TAG}" -docker_pgscript ${CONTAINER_ORIG} /src/test/sql/updates/setup.databases.sql "postgres" -docker_pgscript ${CONTAINER_ORIG} /src/test/sql/updates/pre.testing.sql -docker_pgscript ${CONTAINER_ORIG} /src/test/sql/updates/setup.${TEST_VERSION}.sql -docker_pgcmd ${CONTAINER_ORIG} "CHECKPOINT;" - -# We need the previous version shared libraries as well, so we copy -# all shared libraries out from the original container before stopping -# it. We could limit it to just the preceding version, but this is -# more straightforward. -srcdir=$(docker exec ${CONTAINER_ORIG} /bin/bash -c 'pg_config --pkglibdir') -FILES=$(docker exec ${CONTAINER_ORIG} /bin/bash -c "ls $srcdir/timescaledb*.so") -for file in $FILES; do - docker cp "${CONTAINER_ORIG}:$file" "${TEST_TMPDIR}/$(basename $file)" -done - -# Remove container but keep volume -docker rm -f ${CONTAINER_ORIG} - -echo "Running downgraded container" -docker_run_vol ${CONTAINER_UPDATED} ${UPDATE_VOLUME}:/var/lib/postgresql/data ${UPDATE_TO_IMAGE}:${UPDATE_TO_TAG} - -dstdir=$(docker exec ${CONTAINER_UPDATED} /bin/bash -c 'pg_config --pkglibdir') -for file in $FILES; do - docker cp "${TEST_TMPDIR}/$(basename $file)" "${CONTAINER_UPDATED}:$dstdir" - rm "${TEST_TMPDIR}/$(basename $file)" -done - -echo "==== 1. check caggs ====" -docker_pgcmd ${CONTAINER_UPDATED} "SELECT user_view_schema, user_view_name FROM _timescaledb_catalog.continuous_agg" - -echo "Executing ALTER EXTENSION timescaledb UPDATE for update ($UPDATE_FROM_TAG -> $UPDATE_TO_TAG)" -docker_pgcmd ${CONTAINER_UPDATED} "ALTER EXTENSION timescaledb UPDATE" "single" -# Need to update also postgres DB since add_data_node may connect to -# it and it will be borked if we don't upgrade to an extension binary -# which is available in the image. -docker_pgcmd ${CONTAINER_UPDATED} "ALTER EXTENSION timescaledb UPDATE" "postgres" - -echo "==== 2. check caggs ====" -docker_pgcmd ${CONTAINER_UPDATED} "SELECT user_view_schema, user_view_name FROM _timescaledb_catalog.continuous_agg" - -# We now assume for some reason the user wanted to downgrade, so we -# downgrade the just upgraded version. -echo "Executing ALTER EXTENSION timescaledb UPDATE for downgrade ($UPDATE_TO_TAG -> $UPDATE_FROM_TAG)" -docker_pgcmd ${CONTAINER_UPDATED} "ALTER EXTENSION timescaledb UPDATE TO '$VERSION'" "postgres" -docker_pgcmd ${CONTAINER_UPDATED} "ALTER EXTENSION timescaledb UPDATE TO '$VERSION'" "single" - -# Check that there is nothing wrong before taking a backup -echo "Checking that there are no missing dimension slices" -docker_pgscript ${CONTAINER_UPDATED} /src/test/sql/updates/setup.check.sql - -# Code below is similar to how it works for update scripts, but here -# we run it on the downgraded version instead. - -echo "Executing setup script on clean" -docker_pgscript ${CONTAINER_CLEAN_RERUN} /src/test/sql/updates/setup.databases.sql "postgres" -docker_pgscript ${CONTAINER_CLEAN_RERUN} /src/test/sql/updates/pre.testing.sql -docker_pgscript ${CONTAINER_CLEAN_RERUN} /src/test/sql/updates/setup.${TEST_VERSION}.sql -docker_pgscript ${CONTAINER_CLEAN_RERUN} /src/test/sql/updates/setup.post-downgrade.sql - -docker_exec ${CONTAINER_UPDATED} "pg_dump -h localhost -U postgres -Fc single > /tmp/single.dump" -docker cp ${CONTAINER_UPDATED}:/tmp/single.dump ${TEST_TMPDIR}/single.dump - -echo "Restoring database on clean version" -docker cp ${TEST_TMPDIR}/single.dump ${CONTAINER_CLEAN_RESTORE}:/tmp/single.dump - -# Restore single -docker_exec ${CONTAINER_CLEAN_RESTORE} "createdb -h localhost -U postgres single" -docker_pgcmd ${CONTAINER_CLEAN_RESTORE} "ALTER DATABASE single SET timescaledb.restoring='on'" -docker_exec ${CONTAINER_CLEAN_RESTORE} "pg_restore -h localhost -U postgres -d single /tmp/single.dump" -docker_pgcmd ${CONTAINER_CLEAN_RESTORE} "ALTER DATABASE single RESET timescaledb.restoring" - -echo "Comparing downgraded ($UPDATE_TO_TAG -> $UPDATE_FROM_TAG) with clean install ($UPDATE_TO_TAG)" -docker_pgdiff_all /src/test/sql/updates/post.${TEST_VERSION}.sql "single" diff --git a/scripts/test_functions.inc b/scripts/test_functions.inc deleted file mode 100644 index ae8063aa81b..00000000000 --- a/scripts/test_functions.inc +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - -SCRIPT_DIR=$(dirname $0) - -# Run tests given as arguments. -# -# Options: -# -r Run repair tests (optional) -# -k Keep temporary directory -# -vN Use version N of the update tests (required) -run_tests() ( - export TEST_VERSION - export TEST_REPAIR=false - export DO_CLEANUP=true - - OPTIND=1 - while getopts "kv:r" opt; - do - case $opt in - v) - TEST_VERSION=v$OPTARG - ;; - k) - DO_CLEANUP=false - ;; - r) - TEST_REPAIR=true - ;; - *) - exit 1 - ;; - esac - done - - shift $((OPTIND-1)) - - export TAGS="$@" - if [[ "$TEST_REPAIR" = "true" ]]; then - bash ${SCRIPT_DIR}/test_repairs.sh - else - bash ${SCRIPT_DIR}/test_updates.sh - fi - EXIT_CODE=$? - if [ $EXIT_CODE -ne 0 ]; then - exit $EXIT_CODE - fi -) - diff --git a/scripts/test_repair_from_tag.sh b/scripts/test_repair_from_tag.sh deleted file mode 100755 index 96b1d10ba88..00000000000 --- a/scripts/test_repair_from_tag.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env bash - -set -e -set -o pipefail - -SCRIPT_DIR=$(dirname $0) -BASE_DIR=${PWD}/${SCRIPT_DIR}/.. - -GIT_ID=$(git -C ${BASE_DIR} describe --dirty --always | sed -e "s|/|_|g") -WITH_SUPERUSER=true # Update tests have superuser privileges when running tests. -UPDATE_FROM_IMAGE=${UPDATE_FROM_IMAGE:-timescale/timescaledb} -UPDATE_FROM_TAG=${UPDATE_FROM_TAG:-0.1.0} -UPDATE_TO_IMAGE=${UPDATE_TO_IMAGE:-update_test} -UPDATE_TO_TAG=${UPDATE_TO_TAG:-${GIT_ID}} - -# Extra options to pass to psql -PGOPTS="-v TEST_VERSION=${TEST_VERSION} -v TEST_REPAIR=true -v WITH_SUPERUSER=${WITH_SUPERUSER} -v WITH_ROLES=false" -PSQL="psql -qX $PGOPTS" - -DOCKEROPTS="--env TIMESCALEDB_TELEMETRY=off --env POSTGRES_HOST_AUTH_METHOD=trust" - -docker_exec() { - # Echo to stderr - >&2 echo -e "\033[1m$1\033[0m: $2" - docker exec $1 /bin/bash -c "$2" -} - -docker_logs() { - # Echo to stderr - >&2 echo -e "\033[1m$1\033[0m: $2" - docker logs $1 -} - -docker_pgcmd() { - local database=single - OPTIND=1 - while getopts "d:" opt; do - case $opt in - d) - database=$OPTARG - ;; - *) - ;; - esac - done - shift $((OPTIND-1)) - - echo "executing pgcmd on database $database with container $1" - set +e - if ! docker_exec $1 "$PSQL -h localhost -U postgres -d $database -v VERBOSITY=verbose -c \"$2\""; then - docker_logs $1 - exit 1 - fi - set -e -} - -docker_pgscript() { - local database=single - OPTIND=1 - while getopts "d:" opt; do - case $opt in - d) - database=$OPTARG - ;; - *) - ;; - esac - done - shift $((OPTIND-1)) - - docker_exec $1 "$PSQL -h localhost -U postgres -d $database -v ON_ERROR_STOP=1 -f $2" -} - -docker_run() { - docker run $DOCKEROPTS -d --name $1 -v ${BASE_DIR}:/src $2 -c timezone='US/Eastern' -c max_prepared_transactions=100 - wait_for_pg $1 -} - -docker_run_vol() { - docker run $DOCKEROPTS -d --name $1 -v ${BASE_DIR}:/src -v $2 $3 -c timezone='US/Eastern' -c max_prepared_transactions=100 - wait_for_pg $1 -} - -wait_for_pg() { - set +e - for _ in {1..20}; do - sleep 1 - - if docker_exec $1 "pg_isready -h localhost -U postgres"; then - # this makes the test less flaky, although not - # ideal. Apparently, pg_isready is not always a good - # indication of whether the DB is actually ready to accept - # queries - sleep 5 - set -e - return 0 - fi - docker_logs $1 - - done - exit 1 -} - -CONTAINER_ORIG=timescaledb-orig-$$ -CONTAINER_UPDATED=timescaledb-updated-$$ - -echo "**** Checking repair for update from $UPDATE_FROM_TAG ****" - -# Start a container with the correct version -docker_run ${CONTAINER_ORIG} ${UPDATE_FROM_IMAGE}:${UPDATE_FROM_TAG} - -UPDATE_VOLUME=$(docker inspect ${CONTAINER_ORIG} --format='{{range .Mounts }}{{.Name}}{{end}}') - -docker_pgcmd -d postgres ${CONTAINER_ORIG} "CREATE DATABASE single" -docker_pgscript ${CONTAINER_ORIG} /src/test/sql/updates/setup.repair.sql - -# Remove container but keep volume -docker rm -f ${CONTAINER_ORIG} - -docker_run_vol ${CONTAINER_UPDATED} ${UPDATE_VOLUME}:/var/lib/postgresql/data ${UPDATE_TO_IMAGE}:${UPDATE_TO_TAG} - -docker_pgcmd ${CONTAINER_UPDATED} "ALTER EXTENSION timescaledb UPDATE" -docker_pgscript ${CONTAINER_UPDATED} /src/test/sql/updates/post.repair.sql - -# Run an integrity check. It will report if any dimension slices are missing. -docker_pgscript ${CONTAINER_UPDATED} /src/test/sql/updates/post.integrity_test.sql diff --git a/scripts/test_repairs.sh b/scripts/test_repairs.sh deleted file mode 100755 index 999bea69952..00000000000 --- a/scripts/test_repairs.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -set -o pipefail - -SCRIPT_DIR=$(dirname $0) -BASE_DIR=${PWD}/${SCRIPT_DIR}/.. -GIT_ID=$(git -C ${BASE_DIR} describe --dirty --always | sed -e "s|/|_|g") -TEST_TMPDIR=${TEST_TMPDIR:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_repair_test' || mkdir -p /tmp/${RANDOM})} -UPDATE_TO_IMAGE=${UPDATE_TO_IMAGE:-repair_test} -UPDATE_TO_TAG=${UPDATE_TO_TAG:-${GIT_ID}} - -# Build the docker image with current source here so that the parallel -# tests don't all compete in trying to build it first -IMAGE_NAME=${UPDATE_TO_IMAGE} TAG_NAME=${UPDATE_TO_TAG} PG_VERSION=${PG_VERSION} bash ${SCRIPT_DIR}/docker-build.sh - -# Run repair tests in parallel -declare -A tests -for tag in ${TAGS}; do - UPDATE_FROM_TAG=${tag} TEST_VERSION=${TEST_VERSION} bash $SCRIPT_DIR/test_repair_from_tag.sh ${TEST_UPDATE_FROM_TAGS_EXTRA_ARGS} > ${TEST_TMPDIR}/${tag}.log 2>&1 & - - tests[$!]=${tag} - echo "Launched test ${tag} with pid $!" -done - -# Need to wait on each pid in a loop to return the exit status of each -for pid in "${!tests[@]}"; do - echo "Waiting for test pid $pid" - wait $pid - exit_code=$? - echo "Test ${tests[$pid]} (pid $pid) exited with code $exit_code" - - if [ $exit_code -ne 0 ]; then - FAIL_COUNT=$((FAIL_COUNT + 1)) - FAILED_TEST=${tests[$pid]} - if [ -f ${TEST_TMPDIR}/${FAILED_TEST}.log ]; then - echo "###### Failed $UPDATE_TO_TAG test log below #####" - cat ${TEST_TMPDIR}/${FAILED_TEST}.log - fi - fi - echo "###### test log $UPDATE_TO_TAG below #####" - cat ${TEST_TMPDIR}/${tests[$pid]}.log -done - -if [ "$KEEP_TEMP_DIRS" = "false" ]; then - echo "Cleaning up temporary directory" - rm -rf ${TEST_TMPDIR} -fi - -exit $FAIL_COUNT - diff --git a/scripts/test_update_from_tag.sh b/scripts/test_update_from_tag.sh deleted file mode 100755 index cad39e6ba40..00000000000 --- a/scripts/test_update_from_tag.sh +++ /dev/null @@ -1,228 +0,0 @@ -#!/usr/bin/env bash - -set -e -set -o pipefail - -SCRIPT_DIR=$(dirname $0) -BASE_DIR=${PWD}/${SCRIPT_DIR}/.. -WITH_SUPERUSER=true # Update tests have superuser privileges when running tests. -TEST_VERSION=${TEST_VERSION:-v2} -TEST_TMPDIR=${TEST_TMPDIR:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_update_test' || mkdir -p /tmp/${RANDOM})} -UPDATE_PG_PORT=${UPDATE_PG_PORT:-6432} -CLEAN_PG_PORT=${CLEAN_PG_PORT:-6433} -PG_VERSION=${PG_VERSION:-14.3} -GIT_ID=$(git -C ${BASE_DIR} describe --dirty --always | sed -e "s|/|_|g") -UPDATE_FROM_IMAGE=${UPDATE_FROM_IMAGE:-timescale/timescaledb} -UPDATE_FROM_TAG=${UPDATE_FROM_TAG:-0.1.0} -UPDATE_TO_IMAGE=${UPDATE_TO_IMAGE:-update_test} -UPDATE_TO_TAG=${UPDATE_TO_TAG:-${GIT_ID}} -DO_CLEANUP=${DO_CLEANUP:-true} -PGOPTS="-v TEST_VERSION=${TEST_VERSION} -v TEST_REPAIR=${TEST_REPAIR} -v WITH_SUPERUSER=${WITH_SUPERUSER} -v WITH_ROLES=true -v WITH_CHUNK=true" - -# PID of the current shell -PID=$$ - -# Container names. Append shell PID so that we can run this script in parallel -CONTAINER_ORIG=timescaledb-orig-${PID} -CONTAINER_CLEAN_RESTORE=timescaledb-clean-restore-${PID} -CONTAINER_UPDATED=timescaledb-updated-${PID} -CONTAINER_CLEAN_RERUN=timescaledb-clean-rerun-${PID} - -export PG_VERSION - -if [[ "$DO_CLEANUP" = "false" ]]; then - echo "!!Debug mode: Containers and temporary directory will be left on disk" -else - echo "!!Containers and temporary directory will be cleaned up" -fi - -trap cleanup EXIT - -remove_containers() { - docker rm -vf ${CONTAINER_ORIG} 2>/dev/null - docker rm -vf ${CONTAINER_CLEAN_RESTORE} 2>/dev/null - docker rm -vf ${CONTAINER_UPDATED} 2>/dev/null - docker rm -vf ${CONTAINER_CLEAN_RERUN} 2>/dev/null - if [[ -n "${CLEAN_VOLUME}" ]]; then - docker volume rm -f ${CLEAN_VOLUME} 2>/dev/null - fi - if [[ -n "${UPDATE_VOLUME}" ]]; then - docker volume rm -f ${UPDATE_VOLUME} 2>/dev/null - fi -} - -cleanup() { - # Save status here so that we can return the status of the last - # command in the script and not the last command of the cleanup - # function - local status="$?" - set +e # do not exit immediately on failure in cleanup handler - if [ "$DO_CLEANUP" = "true" ]; then - rm -rf ${TEST_TMPDIR} - sleep 1 - remove_containers - fi - echo "Test with pid ${PID} exited with code ${status}" - exit ${status} -} - -docker_exec() { - # Echo to stderr - >&2 echo -e "\033[1m$1\033[0m: $2" - docker exec $1 /bin/bash -c "$2" -} - -docker_logs() { - # Echo to stderr - >&2 echo -e "\033[1m$1\033[0m: $2" - docker logs $1 -} - -docker_pgcmd() { - local database=${3:-single} - echo "executing pgcmd on database $database" - set +e - if ! docker_exec $1 "psql -h localhost -U postgres -d $database $PGOPTS -v VERBOSITY=verbose -c \"$2\"" - then - docker_logs $1 - exit 1 - fi - set -e -} - -docker_pgscript() { - local database=${3:-single} - docker_exec $1 "psql --set VERBOSITY=verbose --set ECHO=all -h localhost -U postgres -d $database $PGOPTS -v ON_ERROR_STOP=1 -f $2" -} - -docker_pgtest() { - local database=${3:-single} - set +e - >&2 echo -e "\033[1m$1\033[0m: $2" - if ! docker exec $1 psql -X -v ECHO=ALL -v ON_ERROR_STOP=1 -h localhost -U postgres -d $database $PGOPTS -f $2 > ${TEST_TMPDIR}/$1.out - then - docker_logs $1 - exit 1 - fi - set -e -} - -docker_pgdiff_all() { - local database=${2:-single} - diff_file1=update_test.restored.diff.${UPDATE_FROM_TAG} - diff_file2=update_test.clean.diff.${UPDATE_FROM_TAG} - docker_pgtest ${CONTAINER_UPDATED} $1 $database - docker_pgtest ${CONTAINER_CLEAN_RESTORE} $1 $database - docker_pgtest ${CONTAINER_CLEAN_RERUN} $1 $database - echo "Diffing updated container vs restored. Updated: ${CONTAINER_UPDATED} restored: ${CONTAINER_CLEAN_RESTORE}" - diff -u ${TEST_TMPDIR}/${CONTAINER_UPDATED}.out ${TEST_TMPDIR}/${CONTAINER_CLEAN_RESTORE}.out | tee ${diff_file1} - if [ ! -s ${diff_file1} ]; then - rm ${diff_file1} - fi - echo "Diffing updated container vs clean run. Updated: ${CONTAINER_UPDATED} clean run: ${CONTAINER_CLEAN_RERUN}" - diff -u ${TEST_TMPDIR}/${CONTAINER_UPDATED}.out ${TEST_TMPDIR}/${CONTAINER_CLEAN_RERUN}.out | tee ${diff_file2} - if [ ! -s ${diff_file2} ]; then - rm ${diff_file2} - fi -} - -docker_run() { - docker run --env TIMESCALEDB_TELEMETRY=off --env POSTGRES_HOST_AUTH_METHOD=trust -d --name $1 -v ${BASE_DIR}:/src $2 -c timezone="GMT" -c max_prepared_transactions=100 - wait_for_pg $1 -} - -docker_run_vol() { - docker run --env TIMESCALEDB_TELEMETRY=off --env POSTGRES_HOST_AUTH_METHOD=trust -d --name $1 -v ${BASE_DIR}:/src -v $2 $3 -c timezone="GMT" -c max_prepared_transactions=100 - wait_for_pg $1 -} - -wait_for_pg() { - set +e - for _ in {1..20}; do - sleep 1 - - if docker_exec $1 "pg_isready -U postgres" - then - # this makes the test less flaky, although not - # ideal. Apparently, pg_isready is not always a good - # indication of whether the DB is actually ready to accept - # queries - sleep 1 - set -e - return 0 - fi - docker_logs $1 - - done - exit 1 -} - -# shellcheck disable=SC2001 -# SC2001: See if you can use ${variable//search/replace} instead. -VERSION=$(echo ${UPDATE_FROM_TAG} | sed 's/\([0-9]\{0,\}\.[0-9]\{0,\}\.[0-9]\{0,\}\).*/\1/g') -echo "Testing from version ${VERSION} (test version ${TEST_VERSION})" -echo "Using temporary directory ${TEST_TMPDIR}" - -remove_containers || true - -IMAGE_NAME=${UPDATE_TO_IMAGE} TAG_NAME=${UPDATE_TO_TAG} PG_VERSION=${PG_VERSION} bash ${SCRIPT_DIR}/docker-build.sh - -set -x - -echo "Launching containers" -docker_run ${CONTAINER_ORIG} ${UPDATE_FROM_IMAGE}:${UPDATE_FROM_TAG} -docker_run ${CONTAINER_CLEAN_RESTORE} ${UPDATE_TO_IMAGE}:${UPDATE_TO_TAG} -docker_run ${CONTAINER_CLEAN_RERUN} ${UPDATE_TO_IMAGE}:${UPDATE_TO_TAG} - -# Create roles for test. Roles must be created outside of regular -# setup scripts; they must be added separately to each instance since -# roles are not dumped by pg_dump. -docker_pgscript ${CONTAINER_ORIG} /src/test/sql/updates/setup.roles.sql "postgres" -docker_pgscript ${CONTAINER_CLEAN_RESTORE} /src/test/sql/updates/setup.roles.sql "postgres" -docker_pgscript ${CONTAINER_CLEAN_RERUN} /src/test/sql/updates/setup.roles.sql "postgres" - -CLEAN_VOLUME=$(docker inspect ${CONTAINER_CLEAN_RESTORE} --format='{{range .Mounts }}{{.Name}}{{end}}') -UPDATE_VOLUME=$(docker inspect ${CONTAINER_ORIG} --format='{{range .Mounts }}{{.Name}}{{end}}') - -echo "Executing setup script on container running ${UPDATE_FROM_IMAGE}:${UPDATE_FROM_TAG}" -docker_pgscript ${CONTAINER_ORIG} /src/test/sql/updates/setup.databases.sql "postgres" -docker_pgscript ${CONTAINER_ORIG} /src/test/sql/updates/pre.testing.sql -docker_pgscript ${CONTAINER_ORIG} /src/test/sql/updates/setup.${TEST_VERSION}.sql -docker_pgcmd ${CONTAINER_ORIG} "CHECKPOINT;" - -# Remove container but keep volume -docker rm -f ${CONTAINER_ORIG} - -echo "Running update container" -docker_run_vol ${CONTAINER_UPDATED} ${UPDATE_VOLUME}:/var/lib/postgresql/data ${UPDATE_TO_IMAGE}:${UPDATE_TO_TAG} - -echo "Executing ALTER EXTENSION timescaledb UPDATE ($UPDATE_FROM_TAG -> $UPDATE_TO_TAG)" -docker_pgcmd ${CONTAINER_UPDATED} "ALTER EXTENSION timescaledb UPDATE" "single" -# Need to update also postgres DB since add_data_node may connect to -# it and it will be borked if we don't upgrade to an extension binary -# which is available in the image. -docker_pgcmd ${CONTAINER_UPDATED} "ALTER EXTENSION timescaledb UPDATE" "postgres" - -# Check that there is nothing wrong before taking a backup -echo "Checking that there are no missing dimension slices" -docker_pgscript ${CONTAINER_UPDATED} /src/test/sql/updates/setup.check.sql - -echo "Executing setup script on clean" -docker_pgscript ${CONTAINER_CLEAN_RERUN} /src/test/sql/updates/setup.databases.sql "postgres" -docker_pgscript ${CONTAINER_CLEAN_RERUN} /src/test/sql/updates/pre.testing.sql -docker_pgscript ${CONTAINER_CLEAN_RERUN} /src/test/sql/updates/setup.${TEST_VERSION}.sql - -docker_exec ${CONTAINER_UPDATED} "pg_dump -h localhost -U postgres -Fc single > /tmp/single.dump" -docker cp ${CONTAINER_UPDATED}:/tmp/single.dump ${TEST_TMPDIR}/single.dump - -echo "Restoring database on clean version" -docker cp ${TEST_TMPDIR}/single.dump ${CONTAINER_CLEAN_RESTORE}:/tmp/single.dump - -# Restore single -docker_exec ${CONTAINER_CLEAN_RESTORE} "createdb -h localhost -U postgres single" -docker_pgcmd ${CONTAINER_CLEAN_RESTORE} "ALTER DATABASE single SET timescaledb.restoring='on'" -docker_exec ${CONTAINER_CLEAN_RESTORE} "pg_restore -h localhost -U postgres -d single /tmp/single.dump" -docker_pgcmd ${CONTAINER_CLEAN_RESTORE} "ALTER DATABASE single RESET timescaledb.restoring" - -echo "Comparing upgraded ($UPDATE_FROM_TAG -> $UPDATE_TO_TAG) with clean install ($UPDATE_TO_TAG)" -docker_pgdiff_all /src/test/sql/updates/post.${TEST_VERSION}.sql "single" diff --git a/scripts/test_update_from_version.sh b/scripts/test_update_from_version.sh new file mode 100755 index 00000000000..566d355b060 --- /dev/null +++ b/scripts/test_update_from_version.sh @@ -0,0 +1,137 @@ +#!/usr/bin/env bash + +# During the update test the following databases will be created: +# - baseline: fresh installation of $TO_VERSION +# - updated: install $FROM_VERSION, update to $TO_VERSION +# - restored: restore from updated dump +# - repair: install $FROM_VERSION, update to $TO_VERSION and run integrity tests + +set -e +set -u + +FROM_VERSION=${FROM_VERSION:-$(grep '^downgrade_to_version ' version.config | awk '{ print $3 }')} +TO_VERSION=${TO_VERSION:-$(grep '^version ' version.config | awk '{ print $3 }')} + +TEST_REPAIR=${TEST_REPAIR:-false} +TEST_VERSION=${TEST_VERSION:-v8} + +OUTPUT_DIR=${OUTPUT_DIR:-update_test/${FROM_VERSION}_to_${TO_VERSION}} +PGDATA="${OUTPUT_DIR}/data" +# Get an unused port to allow for parallel execution +PGHOST=localhost +PGPORT=${PGPORT:-$(python -c 'import socket; s=socket.socket(); s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) ; s.bind(("", 0)); print(s.getsockname()[1]); s.close()')} +PGDATABASE=postgres + +export PGHOST PGPORT PGDATABASE PGDATA + +run_sql() { + local db=${2:-$PGDATABASE} + psql -X -q -d "${db}" -v ON_ERROR_STOP=1 -v VERBOSITY=verbose -v WITH_ROLES=true -v WITH_SUPERUSER=true -v WITH_CHUNK=true -c "${1}" +} + +run_sql_file() { + local db=${2:-$PGDATABASE} + psql -X -d "${db}" -v ON_ERROR_STOP=1 -v VERBOSITY=verbose -v WITH_ROLES=true -v WITH_SUPERUSER=true -v WITH_CHUNK=true -f "${1}" +} + +check_version() { + psql -X -c "DO \$\$BEGIN PERFORM from pg_available_extension_versions WHERE name='timescaledb' AND version='$1'; IF NOT FOUND THEN RAISE 'Version $1 not available'; END IF; END\$\$;" > /dev/null +} + +trap cleanup EXIT + +cleanup() { + # Save status here so that we can return the status of the last + # command in the script and not the last command of the cleanup + # function + local status="$?" + set +e # do not exit immediately on failure in cleanup handler + pg_ctl stop > /dev/null + rm -rf "${PGDATA}" + exit ${status} +} + +mkdir -p "${OUTPUT_DIR}" +rm -rf "${OUTPUT_DIR}/data" +mkdir -p "${OUTPUT_DIR}/data" + +UNIX_SOCKET_DIR=$(readlink -f "${OUTPUT_DIR}") + +initdb > "${OUTPUT_DIR}/initdb.log" 2>&1 +pg_ctl -l "${OUTPUT_DIR}/postgres.log" start -o "-c unix_socket_directories='${UNIX_SOCKET_DIR}' -c timezone=GMT -c client_min_messages=warning -c port=${PGPORT} -c max_prepared_transactions=100 -c shared_preload_libraries=timescaledb -c timescaledb.telemetry_level=off -c max_worker_processes=0" +pg_isready -t 30 > /dev/null + +echo -e "\nUpdate test for ${FROM_VERSION} -> ${TO_VERSION}\n" + +# caller should ensure that the versions are available +check_version "${FROM_VERSION}" +check_version "${TO_VERSION}" + +run_sql_file test/sql/updates/setup.roles.sql > /dev/null + +echo "Creating baseline database" +{ + run_sql "CREATE DATABASE baseline;" + PGDATABASE=baseline + run_sql "CREATE EXTENSION timescaledb VERSION \"${TO_VERSION}\";" + run_sql_file test/sql/updates/pre.testing.sql + run_sql_file test/sql/updates/setup.${TEST_VERSION}.sql + run_sql "CHECKPOINT;" + run_sql_file test/sql/updates/setup.check.sql +} > "${OUTPUT_DIR}/baseline.log" 2>&1 + +echo "Creating updated database" +{ + run_sql "CREATE DATABASE updated;" > "${OUTPUT_DIR}/updated.log" + PGDATABASE=updated + run_sql "CREATE EXTENSION timescaledb VERSION \"${FROM_VERSION}\";" + run_sql_file test/sql/updates/pre.testing.sql + run_sql_file test/sql/updates/setup.${TEST_VERSION}.sql + run_sql "CHECKPOINT;" >> "${OUTPUT_DIR}/updated.log" + run_sql "ALTER EXTENSION timescaledb UPDATE TO \"${TO_VERSION}\";" + run_sql_file test/sql/updates/setup.check.sql +} > "${OUTPUT_DIR}/updated.log" 2>&1 + +echo "Creating restored database" +{ + run_sql "CREATE DATABASE restored;" + PGDATABASE=restored + run_sql "CREATE EXTENSION timescaledb VERSION \"${TO_VERSION}\";" + run_sql "ALTER DATABASE restored SET timescaledb.restoring='on';" + pg_dump -Fc -d updated > "${OUTPUT_DIR}/updated.dump" + pg_restore -d restored "${OUTPUT_DIR}/updated.dump" + run_sql "ALTER DATABASE restored RESET timescaledb.restoring;" +} > "${OUTPUT_DIR}/restored.log" 2>&1 + +run_sql_file test/sql/updates/post.${TEST_VERSION}.sql baseline > "${OUTPUT_DIR}/post.baseline.log" +run_sql_file test/sql/updates/post.${TEST_VERSION}.sql updated > "${OUTPUT_DIR}/post.updated.log" +run_sql_file test/sql/updates/post.${TEST_VERSION}.sql restored > "${OUTPUT_DIR}/post.restored.log" + +if [ "${TEST_REPAIR}" = "true" ]; then + echo "Creating repair database" + { + run_sql "CREATE DATABASE repair;" + PGDATABASE=repair + run_sql "CREATE EXTENSION timescaledb VERSION \"${FROM_VERSION}\";" + run_sql_file test/sql/updates/setup.repair.sql baseline + run_sql "ALTER EXTENSION timescaledb UPDATE TO \"${TO_VERSION}\";" + run_sql_file test/sql/updates/post.repair.sql baseline + run_sql_file test/sql/updates/post.integrity_test.sql baseline + } > "${OUTPUT_DIR}/repair.log" 2>&1 +fi + +diff -u "${OUTPUT_DIR}/post.baseline.log" "${OUTPUT_DIR}/post.updated.log" | tee "${OUTPUT_DIR}/baseline_vs_updated.diff" +if [ ! -s "${OUTPUT_DIR}/baseline_vs_updated.diff" ]; then + rm "${OUTPUT_DIR}/baseline_vs_updated.diff" +fi +diff -u "${OUTPUT_DIR}/post.baseline.log" "${OUTPUT_DIR}/post.restored.log" | tee "${OUTPUT_DIR}/baseline_vs_restored.diff" +if [ ! -s "${OUTPUT_DIR}/baseline_vs_restored.diff" ]; then + rm "${OUTPUT_DIR}/baseline_vs_restored.diff" +fi + +if [ -f "${OUTPUT_DIR}/baseline_vs_updated.diff" ] || [ -f "${OUTPUT_DIR}/baseline_vs_restored.diff" ]; then + echo "Update test for ${FROM_VERSION} -> ${TO_VERSION} failed" + exit 1 +fi + + diff --git a/scripts/test_updates.sh b/scripts/test_updates.sh index f92dbabcd26..617e9734888 100755 --- a/scripts/test_updates.sh +++ b/scripts/test_updates.sh @@ -1,106 +1,128 @@ #!/bin/bash -set -o pipefail -set +e # Should not exit immediately on failure - -SCRIPT_DIR=$(dirname $0) -TEST_TMPDIR=${TEST_TMPDIR:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_update_test' || mkdir -p /tmp/$RANDOM )} -BASE_DIR=${PWD}/${SCRIPT_DIR}/.. -TAGS=${TAGS:-} -TEST_VERSION=${TEST_VERSION:-} -GIT_ID=$(git -C ${BASE_DIR} describe --dirty --always | sed -e "s|/|_|g") -UPDATE_TO_IMAGE=${UPDATE_TO_IMAGE:-update_test} -UPDATE_TO_TAG=${UPDATE_TO_TAG:-${GIT_ID}} -PG_VERSION=${PG_VERSION:-14.3} - -# This will propagate to the test_update_from_tags.sh script -export TEST_REPAIR - -FAILED_TEST= -KEEP_TEMP_DIRS=false -TEST_UPDATE_FROM_TAGS_EXTRA_ARGS= -TEST_REPAIR=false -FAIL_COUNT=0 +set -eu -# Declare a hash table to keep test names keyed by pid -declare -A tests - -while getopts "cd" opt; -do - case $opt in - c) - echo "Forcing cleanup of build image" - docker rmi -f ${UPDATE_TO_IMAGE}:${UPDATE_TO_TAG} - ;; - d) - echo "Keeping temporary directory ${TEST_TMPDIR}" - KEEP_TEMP_DIRS=true - TEST_UPDATE_FROM_TAGS_EXTRA_ARGS="-d" - ;; - *) - echo "Unknown flag '$opt'" - exit 1 - ;; - esac -done +SCRIPT_DIR=$(readlink -f "$(dirname $0)") +PG_MAJOR_VERSION=$(pg_config --version | awk '{print $2}' | awk -F. '{print $1}') -kill_all_tests() { - local exit_code="$?" - set +e # do not exit immediately on failure - echo "Killing all tests" - kill "${!tests[@]}" 2>/dev/null - return $exit_code -} +PG_EXTENSION_DIR=$(pg_config --sharedir)/extension +if [ "${CI:-false}" == true ]; then + GIT_REF=${GIT_REF:-$(git rev-parse HEAD)} +else + GIT_REF=$(git branch --show-current) +fi -trap kill_all_tests INT HUP +BUILD_DIR="build_update_pg${PG_MAJOR_VERSION}" + +VERSIONS="" +FAILED_VERSIONS="" + +ALL_VERSIONS=$(git tag --sort=taggerdate | grep -P '^[2]\.[0-9]+\.[0-9]+$') +MAX_VERSION=$(grep '^downgrade_to_version ' version.config | awk '{ print $3 }') + +# major version is always 2 atm +max_minor_version=$(echo "${MAX_VERSION}" | awk -F. '{print $2}') +max_patch_version=$(echo "${MAX_VERSION}" | awk -F. '{print $3}') + +# Filter versions depending on the current postgres version +# Minimum version for valid update paths are as follows: +# PG 13 v7 2.1+ +# PG 14 v8 2.5+ +# PG 15 v8 2.9+ +# PG 16 v8 2.13+ +for version in ${ALL_VERSIONS}; do + minor_version=$(echo "${version}" | awk -F. '{print $2}') + patch_version=$(echo "${version}" | awk -F. '{print $3}') + + # skip versions that are newer than the max version + # We might have a tag for a newer version defined already but the post release + # adjustment have not been merged yet. So we want to skip those versions. + if [ "${minor_version}" -gt "${max_minor_version}" ]; then + continue + elif [ "${minor_version}" -eq "${max_minor_version}" ] && [ "${patch_version}" -gt "${max_patch_version}" ]; then + continue + fi + + if [ "${minor_version}" -eq 0 ]; then + # not part of any valid update path + continue + elif [ "${minor_version}" -le 4 ]; then + continue + # on <= 2.4 we need to run v7 version of the update test + if [ "${PG_MAJOR_VERSION}" -le 13 ]; then + VERSIONS="${VERSIONS} ${version}" + fi + elif [ "${minor_version}" -le 8 ]; then + if [ "${PG_MAJOR_VERSION}" -le 14 ]; then + VERSIONS="${VERSIONS} ${version}" + fi + elif [ "${minor_version}" -le 12 ]; then + if [ "${PG_MAJOR_VERSION}" -le 15 ]; then + VERSIONS="${VERSIONS} ${version}" + fi + else + VERSIONS="${VERSIONS} ${version}" + fi +done -if [ -z "${TEST_VERSION}" ]; then - echo "No TEST_VERSION specified" - exit 1 -fi +FAIL_COUNT=0 -if [ -z "${TAGS}" ]; then - echo "No TAGS specified" - exit 1 +if [ ! -d "${BUILD_DIR}" ]; then + echo "Initializing build directory" + BUILD_DIR="${BUILD_DIR}" ./bootstrap -DCMAKE_BUILD_TYPE=Release -DWARNINGS_AS_ERRORS=OFF -DASSERTIONS=ON -DLINTER=OFF -DGENERATE_DOWNGRADE_SCRIPT=ON -DREGRESS_CHECKS=OFF -DTAP_CHECKS=OFF fi -# Build the docker image with current source here so that the parallel -# tests don't all compete in trying to build it first -IMAGE_NAME=${UPDATE_TO_IMAGE} TAG_NAME=${UPDATE_TO_TAG} PG_VERSION=${PG_VERSION} bash ${SCRIPT_DIR}/docker-build.sh +for version in ${VERSIONS}; do + if [ ! -f "${PG_EXTENSION_DIR}/timescaledb--${version}.sql" ]; then + echo "Building ${version}" + git checkout ${version} + make -C "${BUILD_DIR}" -j4 > /dev/null + sudo make -C "${BUILD_DIR}" install > /dev/null + git checkout ${GIT_REF} + fi +done -# Run update tests in parallel -for tag in ${TAGS}; -do - UPDATE_FROM_TAG=${tag} TEST_VERSION=${TEST_VERSION} "$(dirname $0)/test_update_from_tag.sh" ${TEST_UPDATE_FROM_TAGS_EXTRA_ARGS} > ${TEST_TMPDIR}/${tag}.log 2>&1 & +# We want to use the latest loader for all the tests so we build it last +git checkout ${GIT_REF} +make -C "${BUILD_DIR}" -j4 +sudo make -C "${BUILD_DIR}" install - tests[$!]=${tag} - echo "Launched test ${tag} with pid $!" -done +set +e -# Need to wait on each pid in a loop to return the exit status of each - -# Since we are iterating a hash table, the tests are not going to be -# in order started. But it doesn't matter. -for pid in "${!tests[@]}" -do - echo "Waiting for test pid $pid" - wait $pid - exit_code=$? - echo "Test ${tests[$pid]} (pid $pid) exited with code $exit_code" - - if [ $exit_code -ne 0 ]; then - FAIL_COUNT=$((FAIL_COUNT + 1)) - FAILED_TEST=${tests[$pid]} - if [ -f ${TEST_TMPDIR}/${FAILED_TEST}.log ]; then - echo "###### Failed test log below #####" - cat ${TEST_TMPDIR}/${FAILED_TEST}.log - fi +if [ -n "${VERSIONS}" ]; then + for version in ${VERSIONS}; do + ts_minor_version=$(echo "${version}" | awk -F. '{print $2}') + + if [ "${ts_minor_version}" -le 4 ]; then + TEST_VERSION=v7 + else + TEST_VERSION=v8 fi -done -if [ "$KEEP_TEMP_DIRS" = "false" ]; then - echo "Cleaning up temporary directory" - rm -rf ${TEST_TMPDIR} + if [ "${ts_minor_version}" -ge 10 ]; then + TEST_REPAIR=true + else + TEST_REPAIR=false + fi + + export TEST_VERSION TEST_REPAIR + + FROM_VERSION=${version} "${SCRIPT_DIR}/test_update_from_version.sh" + return_code=$? + if [ $return_code -ne 0 ]; then + FAIL_COUNT=$((FAIL_COUNT + 1)) + FAILED_VERSIONS="${FAILED_VERSIONS} ${version}" + fi + done +fi + +echo -e "\nUpdate test finished for ${VERSIONS}\n" + +if [ $FAIL_COUNT -gt 0 ]; then + echo -e "Failed versions: ${FAILED_VERSIONS}\n" +else + echo -e "All tests succeeded.\n" fi exit $FAIL_COUNT + diff --git a/scripts/test_updates_pg13.sh b/scripts/test_updates_pg13.sh deleted file mode 100755 index 5eb10f9c732..00000000000 --- a/scripts/test_updates_pg13.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -set -e - -PG_VERSION=${PG_VERSION:-13.14} -export PG_VERSION - -SCRIPT_DIR=$(dirname $0) - -# shellcheck source=scripts/test_functions.inc -source ${SCRIPT_DIR}/test_functions.inc - -run_tests "$@" -v7 \ - 2.1.0-pg13 2.1.1-pg13 2.2.0-pg13 2.2.1-pg13 2.3.0-pg13 2.3.1-pg13 \ - 2.4.0-pg13 2.4.1-pg13 2.4.2-pg13 - -run_tests "$@" -v8 \ - 2.5.0-pg13 2.5.1-pg13 2.5.2-pg13 2.6.0-pg13 2.6.1-pg13 2.7.0-pg13 2.7.1-pg13 2.7.2-pg13 \ - 2.8.0-pg13 2.8.1-pg13 2.9.0-pg13 2.9.1-pg13 2.9.2-pg13 2.9.3-pg13 - -run_tests "$@" -v8 \ - 2.10.0-pg13 2.10.1-pg13 2.10.2-pg13 2.10.3-pg13 2.11.0-pg13 2.11.1-pg13 2.11.2-pg13 \ - 2.12.0-pg13 2.12.1-pg13 2.12.2-pg13 2.13.0-pg13 2.13.1-pg13 2.14.0-pg13 2.14.1-pg13 \ - 2.14.2-pg13 - -# Run repair tests for >= 2.10.x versions due to PR #5441 -run_tests "$@" -r -v8 \ - 2.10.0-pg13 2.10.1-pg13 2.10.2-pg13 2.10.3-pg13 2.11.0-pg13 2.11.1-pg13 2.11.2-pg13 \ - 2.12.0-pg13 2.12.1-pg13 2.12.2-pg13 2.13.0-pg13 2.13.1-pg13 2.14.0-pg13 2.14.1-pg13 \ - 2.14.2-pg13 - diff --git a/scripts/test_updates_pg14.sh b/scripts/test_updates_pg14.sh deleted file mode 100755 index dc9e8a849dc..00000000000 --- a/scripts/test_updates_pg14.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -set -e - -PG_VERSION=${PG_VERSION:-14.11} -export PG_VERSION - -SCRIPT_DIR=$(dirname $0) - -# shellcheck source=scripts/test_functions.inc -source ${SCRIPT_DIR}/test_functions.inc - -run_tests "$@" -v7 \ - 2.5.0-pg14 2.5.1-pg14 - -run_tests "$@" -v8 \ - 2.5.0-pg14 2.5.1-pg14 2.5.2-pg14 2.6.0-pg14 2.6.1-pg14 2.7.0-pg14 2.7.1-pg14 2.7.2-pg14 \ - 2.8.0-pg14 2.8.1-pg14 2.9.0-pg14 2.9.1-pg14 2.9.2-pg14 2.9.3-pg14 - -run_tests "$@" -v8 \ - 2.10.0-pg14 2.10.1-pg14 2.10.2-pg14 2.10.3-pg14 2.11.0-pg14 2.11.1-pg14 2.11.2-pg14 \ - 2.12.0-pg14 2.12.1-pg14 2.12.2-pg14 2.13.0-pg14 2.13.1-pg14 2.14.0-pg14 2.14.1-pg14 \ - 2.14.2-pg14 - -# Run repair tests for >=2.10.x versions due to PR #5441 -run_tests "$@" -r -v8 \ - 2.10.0-pg14 2.10.1-pg14 2.10.2-pg14 2.10.3-pg14 2.11.0-pg14 2.11.1-pg14 2.11.2-pg14 \ - 2.12.0-pg14 2.12.1-pg14 2.12.2-pg14 2.13.0-pg14 2.13.1-pg14 2.14.0-pg14 2.14.1-pg14 \ - 2.14.2-pg14 - diff --git a/scripts/test_updates_pg15.sh b/scripts/test_updates_pg15.sh deleted file mode 100755 index 894a90cc9e9..00000000000 --- a/scripts/test_updates_pg15.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -set -e - -PG_VERSION=${PG_VERSION:-15.6} -export PG_VERSION - -SCRIPT_DIR=$(dirname $0) - -# shellcheck source=scripts/test_functions.inc -source ${SCRIPT_DIR}/test_functions.inc - -run_tests "$@" -v8 \ - 2.9.0-pg15 2.9.1-pg15 2.9.2-pg15 2.9.3-pg15 - -run_tests "$@" -v8 \ - 2.10.0-pg15 2.10.1-pg15 2.10.2-pg15 2.10.3-pg15 2.11.0-pg15 2.11.1-pg15 \ - 2.11.2-pg15 2.12.0-pg15 2.12.1-pg15 2.12.2-pg15 2.13.0-pg15 2.13.1-pg15 \ - 2.14.0-pg15 2.14.1-pg15 2.14.2-pg15 - -# Run repair tests for >=2.10.x versions due to PR #5441 -run_tests "$@" -r -v8 \ - 2.10.0-pg15 2.10.1-pg15 2.10.2-pg15 2.10.3-pg15 2.11.0-pg15 2.11.1-pg15 \ - 2.11.2-pg15 2.12.0-pg15 2.12.1-pg15 2.12.2-pg15 2.13.0-pg15 2.13.1-pg15 \ - 2.14.0-pg15 2.14.1-pg15 2.14.2-pg15 - diff --git a/scripts/test_updates_pg16.sh b/scripts/test_updates_pg16.sh deleted file mode 100755 index 4c537fc3e18..00000000000 --- a/scripts/test_updates_pg16.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -set -e - -PG_VERSION=${PG_VERSION:-16.2} -export PG_VERSION - -SCRIPT_DIR=$(dirname $0) - -# shellcheck source=scripts/test_functions.inc -source ${SCRIPT_DIR}/test_functions.inc - -run_tests "$@" -v8 \ - 2.13.0-pg16 2.13.1-pg16 2.14.0-pg16 2.14.1-pg16 2.14.2-pg16 - -# Run repair tests for >=2.10.x versions due to PR #5441 -run_tests "$@" -r -v8 \ - 2.13.0-pg16 2.13.1-pg16 2.14.0-pg16 2.14.1-pg16 2.14.2-pg16 - diff --git a/test/sql/updates/README.md b/test/sql/updates/README.md deleted file mode 100644 index ed21145b86f..00000000000 --- a/test/sql/updates/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Testing using multiple versions - -This tests creates multiple instances with different upgrade -scenarios: - -# CONTAINER_UPDATED: -- install previous version -- run test setup script -- update container -- ALTER EXTENSION UPDATE - -# CONTAINER_CLEAN_RERUN: -- install main -- run test setup script - -# CONTAINER_CLEAN_RESTORE: -- dump CONTAINER_UPDATED with pg_dump -- restore in new container - -After those steps the test post script is run on all instances and -the output diffed, throwing an error if there is a difference between -any of the instances. - diff --git a/test/sql/updates/post.repair.sql b/test/sql/updates/post.repair.sql index b96981fc2b6..a970970eb96 100644 --- a/test/sql/updates/post.repair.sql +++ b/test/sql/updates/post.repair.sql @@ -2,29 +2,9 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-APACHE for a copy of the license. -SELECT - split_part(extversion, '.', 1)::int * 100000 + - split_part(extversion, '.', 2)::int * 100 AS extversion_num -FROM - pg_extension WHERE extname = 'timescaledb' \gset - -SELECT - :extversion_num < 201000 AS test_repair_dimension, - :extversion_num >= 201000 AND :'TEST_VERSION' >= 'v8' AS test_repair_cagg_joins \gset - -\if :test_repair_dimension - -- Re-add the dropped foreign key constraint that was dropped for - -- repair testing. - ALTER TABLE _timescaledb_catalog.chunk_constraint - ADD CONSTRAINT chunk_constraint_dimension_slice_id_fkey - FOREIGN KEY (dimension_slice_id) REFERENCES _timescaledb_catalog.dimension_slice (id); -\endif - -\if :test_repair_cagg_joins --Check if the repaired cagg with joins work alright now - \ir post.repair.cagg_joins.sql - \ir post.repair.hierarchical_cagg.sql -\endif +\ir post.repair.cagg_joins.sql +\ir post.repair.hierarchical_cagg.sql \z repair_test_int \z repair_test_extra diff --git a/test/sql/updates/setup.repair.sql b/test/sql/updates/setup.repair.sql index b529f5964de..0583b364ff2 100644 --- a/test/sql/updates/setup.repair.sql +++ b/test/sql/updates/setup.repair.sql @@ -7,16 +7,6 @@ -- the dimension slice table. The repair script should then repair all -- of them and there should be no dimension slices missing. -SELECT - split_part(extversion, '.', 1)::int * 100000 + - split_part(extversion, '.', 2)::int * 100 AS extversion_num -FROM - pg_extension WHERE extname = 'timescaledb' \gset - -SELECT - :extversion_num < 201000 AS test_repair_dimension, - :extversion_num >= 201000 AS has_cagg_joins \gset - CREATE USER wizard; CREATE USER "Random L User"; @@ -80,132 +70,6 @@ GRANT INSERT ON repair_test_extra TO "Random L User"; -- pg_authid table. DELETE FROM pg_authid WHERE rolname IN ('wizard', 'Random L User'); -\if :test_repair_dimension - -CREATE VIEW slices AS ( - SELECT ch.hypertable_id, - ( - SELECT format('%I.%I', schema_name, table_name)::regclass - FROM _timescaledb_catalog.hypertable ht - WHERE ht.id = ch.hypertable_id - ) AS hypertable, - chunk_id, - di.id AS dimension_id, - dimension_slice_id, - constraint_name, - attname AS column_name, - column_type, - pg_get_expr(conbin, conrelid) AS constraint_expr - FROM _timescaledb_catalog.chunk_constraint cc - JOIN _timescaledb_catalog.chunk ch ON cc.chunk_id = ch.id - JOIN pg_constraint ON conname = constraint_name - JOIN pg_namespace ns ON connamespace = ns.oid AND ns.nspname = ch.schema_name - JOIN pg_attribute ON attnum = conkey[1] AND attrelid = conrelid - JOIN _timescaledb_catalog.dimension di - ON di.hypertable_id = ch.hypertable_id AND attname = di.column_name - ); - --- Break the first time dimension on each table. These are different --- depending on the time type for the table and we need to check all --- versions. -DELETE FROM _timescaledb_catalog.dimension_slice WHERE id IN ( - SELECT dimension_slice_id FROM slices - WHERE hypertable = 'repair_test_int'::regclass AND column_name = 'time' - ORDER BY dimension_slice_id LIMIT 1 -); - -DELETE FROM _timescaledb_catalog.dimension_slice WHERE id IN ( - SELECT dimension_slice_id FROM slices - WHERE hypertable = 'repair_test_timestamp'::regclass AND column_name = 'time' - ORDER BY dimension_slice_id LIMIT 1 -); - -DELETE FROM _timescaledb_catalog.dimension_slice WHERE id IN ( - SELECT dimension_slice_id FROM slices - WHERE hypertable = 'repair_test_timestamptz'::regclass AND column_name = 'time' - ORDER BY dimension_slice_id LIMIT 1 -); - -DELETE FROM _timescaledb_catalog.dimension_slice WHERE id IN ( - SELECT dimension_slice_id FROM slices - WHERE hypertable = 'repair_test_date'::regclass AND column_name = 'time' - ORDER BY dimension_slice_id LIMIT 1 -); - --- Delete all dimension slices for one table to break it seriously. It --- should still be repaired. -DELETE FROM _timescaledb_catalog.dimension_slice WHERE id IN ( - SELECT dimension_slice_id FROM slices - WHERE hypertable = 'repair_test_extra'::regclass -); - --- Break the partition constraints on some of the tables. The --- partition constraints look the same in all tables so we create a --- mix of tables with no missing partition constraint slices, just one --- missing partition constraint dimension slice, and several missing --- partition constraint dimension slices. -DELETE FROM _timescaledb_catalog.dimension_slice WHERE id IN ( - SELECT dimension_slice_id FROM slices - WHERE hypertable = 'repair_test_timestamp'::regclass AND column_name = 'tag' - ORDER BY dimension_slice_id LIMIT 1 -); - -DELETE FROM _timescaledb_catalog.dimension_slice WHERE id IN ( - SELECT dimension_slice_id FROM slices - WHERE hypertable = 'repair_test_date'::regclass AND column_name = 'tag' - ORDER BY dimension_slice_id -); - -\echo **** Expected repairs **** -WITH unparsed_slices AS ( - SELECT dimension_id, - dimension_slice_id, - hypertable, - constraint_name, - column_type, - column_name, - (SELECT SUBSTRING(constraint_expr, $$>=\s*'?([\w\d\s:+-]+)'?$$)) AS range_start, - (SELECT SUBSTRING(constraint_expr, $$<\s*'?([\w\d\s:+-]+)'?$$)) AS range_end - FROM slices -) -SELECT DISTINCT - dimension_slice_id, - dimension_id, - CASE - WHEN column_type = 'timestamptz'::regtype THEN - EXTRACT(EPOCH FROM range_start::timestamptz)::bigint * 1000000 - WHEN column_type = 'timestamp'::regtype THEN - EXTRACT(EPOCH FROM range_start::timestamp)::bigint * 1000000 - WHEN column_type = 'date'::regtype THEN - EXTRACT(EPOCH FROM range_start::date)::bigint * 1000000 - ELSE - CASE - WHEN range_start IS NULL - THEN (-9223372036854775808)::bigint - ELSE range_start::bigint - END - END AS range_start, - CASE - WHEN column_type = 'timestamptz'::regtype THEN - EXTRACT(EPOCH FROM range_end::timestamptz)::bigint * 1000000 - WHEN column_type = 'timestamp'::regtype THEN - EXTRACT(EPOCH FROM range_end::timestamp)::bigint * 1000000 - WHEN column_type = 'date'::regtype THEN - EXTRACT(EPOCH FROM range_end::date)::bigint * 1000000 - ELSE - CASE WHEN range_end IS NULL - THEN 9223372036854775807::bigint - ELSE range_end::bigint - END - END AS range_end - FROM unparsed_slices - WHERE dimension_slice_id NOT IN (SELECT id FROM _timescaledb_catalog.dimension_slice); - -DROP VIEW slices; -\endif - \ir setup.repair.cagg.sql -\if :has_cagg_joins - \ir setup.repair.hierarchical_cagg.sql - \ir setup.repair.cagg_joins.sql -\endif +\ir setup.repair.hierarchical_cagg.sql +\ir setup.repair.cagg_joins.sql