diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 01f356c8..9c4d2a6e 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,18 +10,23 @@ jobs:
matrix:
config:
- image: accumulo
+ test: accumulo
- image: dns
+ test: dns
- image: centos7-oj17
- image: centos7-oj17-openldap-referrals
+ test: openldap
- image: spark3-iceberg
+ test: spark3-iceberg
- image: spark3-delta
+ test: spark3-delta
- image: kerberos
test: kerberos
- image: gpdb-6
test: gpdb-6
- image: hdp2.6-hive-kerberized-2
- image: hive3.1-hive
- imag: hive3.1-hive
+ test: hive3.1-hive
- image: hdp2.6-hive-kerberized
test: hdp2.6-hive
- image: hdp3.1-hive-kerberized
@@ -31,6 +36,7 @@ jobs:
- image: cdh5.15-hive-kerberized
test: cdh5.15-hive
- image: cdh5.15-hive-kerberized-kms
+ # TODO add test https://github.com/trinodb/trino/issues/14543
- image: phoenix5
steps:
- uses: actions/checkout@v3
diff --git a/bin/depend.sh b/bin/depend.sh
index 5b9a82e6..8cfd26c1 100755
--- a/bin/depend.sh
+++ b/bin/depend.sh
@@ -1,11 +1,11 @@
#!/usr/bin/env bash
usage() {
- echo "$0 {-d|-g|-p {tag}|-x} {target image Dockerfile} [known image tags]" >&2
+ echo "$0 {-d|-g|-p {tag}|-x} {target image Dockerfile} [known image tags]" >&2
}
find_parent() {
- awk '
+ awk '
BEGIN {
ec = 1;
}
@@ -28,9 +28,9 @@ find_parent() {
}
contains() {
- needle=$1
- shift
- echo "$@" | grep -q -E "\<$needle\>"
+ needle=$1
+ shift
+ echo "$@" | grep -q -E "\<$needle\>"
}
#
@@ -40,43 +40,43 @@ contains() {
# invoking `docker pull' to fetch the base images.
#
make_friendly_name() {
- echo "$1" | sed 's/:/@/g'
+ echo "$1" | sed 's/:/@/g'
}
untag() {
- echo "${1%:*}"
+ echo "${1%:*}"
}
noop() {
- :
+ :
}
depfiles_own_image() {
- local target_image=$1
- local make_friendly_parent=$(make_friendly_name "$2")
- local untagged_parent=$(untag "$2")
-
- echo "$target_image@latest: $make_friendly_parent"
- echo ".PHONY: $target_image.dependants $untagged_parent.dependants"
- echo "$untagged_parent.dependants: $target_image"
- echo "$untagged_parent.dependants: $target_image.dependants"
+ local target_image=$1
+ local make_friendly_parent=$(make_friendly_name "$2")
+ local untagged_parent=$(untag "$2")
+
+ echo "$target_image@latest: $make_friendly_parent"
+ echo ".PHONY: $target_image.dependants $untagged_parent.dependants"
+ echo "$untagged_parent.dependants: $target_image"
+ echo "$untagged_parent.dependants: $target_image.dependants"
}
depfiles_ext_image() {
- local target_image="$1"
- local make_friendly_parent=$(make_friendly_name "$2")
+ local target_image="$1"
+ local make_friendly_parent=$(make_friendly_name "$2")
- echo "$target_image@latest: $make_friendly_parent"
+ echo "$target_image@latest: $make_friendly_parent"
}
list_ext_image() {
- local make_friendly_parent=$(make_friendly_name "$2")
- echo "$make_friendly_parent"
+ local make_friendly_parent=$(make_friendly_name "$2")
+ echo "$make_friendly_parent"
}
graph_own_image() {
- local untagged_parent=$(untag "$2")
- cat <<-EOF
+ local untagged_parent=$(untag "$2")
+ cat <<-EOF
"$1" [shape=box]
"$untagged_parent" [shape=box]
"$1" -> "$untagged_parent"
@@ -85,7 +85,7 @@ EOF
}
graph_ext_image() {
- cat <<-EOF
+ cat <<-EOF
"$1" [shape=box]
"$2" [shape=house; style=filled; fillcolor="#a0a0a0"]
"$1" -> "$2"
@@ -93,50 +93,50 @@ EOF
}
require_parent_tag() {
- local target_image=$1
- local parent_image=$2
+ local target_image=$1
+ local parent_image=$2
- if ! echo "$parent_image" | grep ":${required_parent_tag}\$"; then
- echo "FROM in Dockerfile for $target_image must specify a parent with the tag '$required_parent_tag'" >&2
- exit 1
- fi
+ if ! echo "$parent_image" | grep ":${required_parent_tag}\$"; then
+ echo "FROM in Dockerfile for $target_image must specify a parent with the tag '$required_parent_tag'" >&2
+ exit 1
+ fi
}
while getopts ":dgp:x" c; do
- case $c in
- d)
- own_image_function=depfiles_own_image
- ext_image_function=depfiles_ext_image
- ;;
- x)
- own_image_function=noop
- ext_image_function=list_ext_image
- ;;
- g)
- own_image_function=graph_own_image
- ext_image_function=graph_ext_image
- ;;
- p)
- own_image_function=require_parent_tag
- required_parent_tag=$OPTARG
- ext_image_function=noop
- ;;
- \?)
- echo "Unrecognized option -$OPTARG" >&2
- exit 1
- ;;
- :)
- echo "Option -$OPTARG requires an argument" >&2
- exit 1
- ;;
- esac
+ case $c in
+ d)
+ own_image_function=depfiles_own_image
+ ext_image_function=depfiles_ext_image
+ ;;
+ x)
+ own_image_function=noop
+ ext_image_function=list_ext_image
+ ;;
+ g)
+ own_image_function=graph_own_image
+ ext_image_function=graph_ext_image
+ ;;
+ p)
+ own_image_function=require_parent_tag
+ required_parent_tag=$OPTARG
+ ext_image_function=noop
+ ;;
+ \?)
+ echo "Unrecognized option -$OPTARG" >&2
+ exit 1
+ ;;
+ :)
+ echo "Option -$OPTARG requires an argument" >&2
+ exit 1
+ ;;
+ esac
done
-shift $((OPTIND-1))
+shift $((OPTIND - 1))
if [ -z "$own_image_function" ] || [ $# -lt 2 ]; then
- usage
- exit 1
+ usage
+ exit 1
fi
target_dockerfile=$1
@@ -147,19 +147,19 @@ known_images="$*"
parent_image_tag=$(find_parent "$target_dockerfile")
ec=$?
case $ec in
- 0) ;;
- 1)
- echo "Failed to find a parent docker image in $target_dockerfile" >&2
- exit $ec
- ;;
- 2)
- echo "Found multiple parent docker images in $target_dockerfile" >&2
- exit $ec
- ;;
+ 0) ;;
+ 1)
+ echo "Failed to find a parent docker image in $target_dockerfile" >&2
+ exit $ec
+ ;;
+ 2)
+ echo "Found multiple parent docker images in $target_dockerfile" >&2
+ exit $ec
+ ;;
esac
if contains "$parent_image_tag" "$known_images"; then
- $own_image_function "$target_image" "$parent_image_tag"
+ $own_image_function "$target_image" "$parent_image_tag"
else
- $ext_image_function "$target_image" "$parent_image_tag"
+ $ext_image_function "$target_image" "$parent_image_tag"
fi
diff --git a/bin/flag.sh b/bin/flag.sh
index ca01666a..58c842ea 100755
--- a/bin/flag.sh
+++ b/bin/flag.sh
@@ -1,12 +1,12 @@
#!/bin/sh
usage() {
- echo "$0 {target image}" >&2
+ echo "$0 {target image}" >&2
}
find_args() {
- local target_image=$(dirname "$target_dockerfile")
- awk -v image="$target_image" '
+ local target_image=$(dirname "$target_dockerfile")
+ awk -v image="$target_image" '
BEGIN {
ARG_PATTERN = "^\\s*ARG";
print "DBFLAGS_" image " :=";
@@ -25,8 +25,8 @@ find_args() {
}
if [ $# -lt 1 ]; then
- usage
- exit 1
+ usage
+ exit 1
fi
target_dockerfile=$1
diff --git a/bin/push.sh b/bin/push.sh
index 197b84ab..eca34dca 100755
--- a/bin/push.sh
+++ b/bin/push.sh
@@ -4,8 +4,8 @@ set -xeuo pipefail
while [ "$#" -gt 0 ]; do
while ! docker push "$1"; do
- echo "Failed to push $1, retrying in 30s..."
- sleep 30
+ echo "Failed to push $1, retrying in 30s..."
+ sleep 30
done
shift
done
diff --git a/bin/test.sh b/bin/test.sh
index a9be8774..6a9f806e 100755
--- a/bin/test.sh
+++ b/bin/test.sh
@@ -3,46 +3,46 @@
set -e
function retry() {
- END=$(($(date +%s) + 600))
+ END=$(($(date +%s) + 600))
- while (( $(date +%s) < $END )); do
- set +e
- "$@"
- EXIT_CODE=$?
- set -e
+ while (($(date +%s) < END)); do
+ set +e
+ "$@"
+ EXIT_CODE=$?
+ set -e
- if [[ ${EXIT_CODE} == 0 ]]; then
- break
- fi
- sleep 5
- done
+ if [[ ${EXIT_CODE} == 0 ]]; then
+ break
+ fi
+ sleep 5
+ done
- return ${EXIT_CODE}
+ return ${EXIT_CODE}
}
function environment_compose() {
- docker-compose -f "${DOCKER_CONF_LOCATION}/${ENVIRONMENT}/docker-compose.yml" "$@"
+ docker compose -f "${DOCKER_CONF_LOCATION}/${ENVIRONMENT}/docker-compose.yml" "$@"
}
function check_hadoop() {
- environment_compose exec hadoop-master hive -e 'select 1;' > /dev/null 2>&1
+ environment_compose exec hadoop-master hive -e 'select 1;' >/dev/null 2>&1
}
function run_hadoop_tests() {
- environment_compose exec hadoop-master hive -e 'SELECT 1' &&
- environment_compose exec hadoop-master hive -e 'CREATE TABLE foo (a INT);' &&
- environment_compose exec hadoop-master hive -e 'INSERT INTO foo VALUES (54);' &&
- # SELECT with WHERE to make sure that map-reduce job is scheduled
- environment_compose exec hadoop-master hive -e 'SELECT a FROM foo WHERE a > 0;' &&
- # Test table bucketing
- environment_compose exec hadoop-master hive -e '
+ environment_compose exec hadoop-master hive -e 'SELECT 1' &&
+ environment_compose exec hadoop-master hive -e 'CREATE TABLE foo (a INT);' &&
+ environment_compose exec hadoop-master hive -e 'INSERT INTO foo VALUES (54);' &&
+ # SELECT with WHERE to make sure that map-reduce job is scheduled
+ environment_compose exec hadoop-master hive -e 'SELECT a FROM foo WHERE a > 0;' &&
+ # Test table bucketing
+ environment_compose exec hadoop-master hive -e '
CREATE TABLE bucketed_table(a INT) CLUSTERED BY(a) INTO 32 BUCKETS;
SET hive.enforce.bucketing = true;
INSERT INTO bucketed_table VALUES (1), (2), (3), (4);
' &&
- test $(environment_compose exec hadoop-master hdfs dfs -ls /user/hive/warehouse/bucketed_table \
- | tee /dev/stderr | grep /bucketed_table/ | wc -l) -ge 4 &&
- true
+ test $(environment_compose exec hadoop-master hdfs dfs -ls /user/hive/warehouse/bucketed_table |
+ tee /dev/stderr | grep /bucketed_table/ | wc -l) -ge 4 &&
+ true
}
function run_hive_transactional_tests() {
@@ -50,22 +50,27 @@ function run_hive_transactional_tests() {
CREATE TABLE transactional_table (x int) STORED AS orc TBLPROPERTIES ('transactional'='true');
INSERT INTO transactional_table VALUES (1), (2), (3), (4);
" &&
- environment_compose exec hadoop-master hive -e 'SELECT x FROM transactional_table WHERE x > 0;' &&
- environment_compose exec hadoop-master hive -e 'DELETE FROM transactional_table WHERE x = 2;' &&
- environment_compose exec hadoop-master hive -e 'UPDATE transactional_table SET x = 14 WHERE x = 4;' &&
- environment_compose exec hadoop-master hive -e 'SELECT x FROM transactional_table WHERE x > 0;' &&
- true
+ environment_compose exec hadoop-master hive -e 'SELECT x FROM transactional_table WHERE x > 0;' &&
+ environment_compose exec hadoop-master hive -e 'DELETE FROM transactional_table WHERE x = 2;' &&
+ environment_compose exec hadoop-master hive -e 'UPDATE transactional_table SET x = 14 WHERE x = 4;' &&
+ environment_compose exec hadoop-master hive -e 'SELECT x FROM transactional_table WHERE x > 0;' &&
+ true
}
function check_gpdb() {
- environment_compose exec gpdb su gpadmin -l -c "pg_isready"
+ environment_compose exec gpdb su gpadmin -l -c "pg_isready"
+}
+
+function check_health() {
+ local service=$1
+ test "$(environment_compose ps --format json | jq -er --arg name "$service" '.[] | select(.Service == $name) | .Health')" == "healthy"
}
function run_gpdb_tests() {
environment_compose exec gpdb su gpadmin -l -c "psql -c 'CREATE TABLE foo (a INT) DISTRIBUTED RANDOMLY'" &&
- environment_compose exec gpdb su gpadmin -l -c "psql -c 'INSERT INTO foo VALUES (54)'" &&
- environment_compose exec gpdb su gpadmin -l -c "psql -c 'SELECT a FROM foo'" &&
- true
+ environment_compose exec gpdb su gpadmin -l -c "psql -c 'INSERT INTO foo VALUES (54)'" &&
+ environment_compose exec gpdb su gpadmin -l -c "psql -c 'SELECT a FROM foo'" &&
+ true
}
function run_kerberos_tests() {
@@ -74,52 +79,55 @@ function run_kerberos_tests() {
environment_compose exec kerberos kinit -kt ala.keytab ala@STARBURSTDATA.COM
}
+function check_openldap() {
+ environment_compose exec openldap /usr/bin/wait-for-slapd.sh
+}
+
function stop_all_containers() {
- local ENVIRONMENT
- for ENVIRONMENT in $(getAvailableEnvironments)
- do
- stop_docker_compose_containers ${ENVIRONMENT}
- done
+ local ENVIRONMENT
+ for ENVIRONMENT in $(getAvailableEnvironments); do
+ stop_docker_compose_containers ${ENVIRONMENT}
+ done
}
function stop_docker_compose_containers() {
- local ENVIRONMENT=$1
- RUNNING_CONTAINERS=$(environment_compose ps -q)
-
- if [[ ! -z ${RUNNING_CONTAINERS} ]]; then
- # stop containers started with "up", removing their volumes
- # Some containers (SQL Server) fail to stop on Travis after running the tests. We don't have an easy way to
- # reproduce this locally. Since all the tests complete successfully, we ignore this failure.
- environment_compose down -v || true
- fi
+ local ENVIRONMENT=$1
+ RUNNING_CONTAINERS=$(environment_compose ps -q)
+
+ if [[ -n ${RUNNING_CONTAINERS} ]]; then
+ # stop containers started with "up", removing their volumes
+ # Some containers (SQL Server) fail to stop on Travis after running the tests. We don't have an easy way to
+ # reproduce this locally. Since all the tests complete successfully, we ignore this failure.
+ environment_compose down -v || true
+ fi
- echo "Docker compose containers stopped: [$ENVIRONMENT]"
+ echo "Docker compose containers stopped: [$ENVIRONMENT]"
}
function cleanup() {
- stop_docker_compose_containers ${ENVIRONMENT}
+ stop_docker_compose_containers ${ENVIRONMENT}
- # Ensure that the logs processes are terminated.
- # In most cases after the docker containers are stopped, logs processes must be terminated.
- if [[ ! -z ${LOGS_PID} ]]; then
- kill ${LOGS_PID} 2>/dev/null || true
- fi
+ # Ensure that the logs processes are terminated.
+ # In most cases after the docker containers are stopped, logs processes must be terminated.
+ if [[ -n ${LOGS_PID} ]]; then
+ kill ${LOGS_PID} 2>/dev/null || true
+ fi
- # docker logs processes are being terminated as soon as docker container are stopped
- # wait for docker logs termination
- wait 2>/dev/null || true
+ # docker logs processes are being terminated as soon as docker container are stopped
+ # wait for docker logs termination
+ wait 2>/dev/null || true
}
function terminate() {
- trap - INT TERM EXIT
- set +e
- cleanup
- exit 130
+ trap - INT TERM EXIT
+ set +e
+ cleanup
+ exit 130
}
function getAvailableEnvironments() {
- for i in $(ls -d $DOCKER_CONF_LOCATION/*/); do echo ${i%%/}; done \
- | grep -v files | grep -v common | xargs -n1 basename
+ for i in $(ls -d $DOCKER_CONF_LOCATION/*/); do echo ${i%%/}; done |
+ grep -v files | grep -v common | xargs -n1 basename
}
SCRIPT_DIR=${BASH_SOURCE%/*}
@@ -130,14 +138,14 @@ ENVIRONMENT=$1
# Get the list of valid environments
if [[ ! -f "$DOCKER_CONF_LOCATION/$ENVIRONMENT/docker-compose.yml" ]]; then
- echo "Usage: run_on_docker.sh <$(getAvailableEnvironments | tr '\n' '|')>"
- exit 1
+ echo "Usage: run_on_docker.sh <$(getAvailableEnvironments | tr '\n' '|')>"
+ exit 1
fi
shift 1
# check docker and docker compose installation
-docker-compose version
+docker compose version
docker version
stop_all_containers
@@ -152,7 +160,11 @@ environment_compose logs --no-color -f &
LOGS_PID=$!
-if [[ ${ENVIRONMENT} == "kerberos" ]]; then
+if [[ ${ENVIRONMENT} == *"accumulo"* ]]; then
+ retry check_health accumulo
+elif [[ ${ENVIRONMENT} == *"dns"* ]]; then
+ retry check_health dns
+elif [[ ${ENVIRONMENT} == "kerberos" ]]; then
run_kerberos_tests
elif [[ ${ENVIRONMENT} == *"gpdb"* ]]; then
# wait until gpdb process is started
@@ -163,7 +175,7 @@ elif [[ ${ENVIRONMENT} == *"gpdb"* ]]; then
set +e
sleep 10
run_gpdb_tests
-else
+elif [[ ${ENVIRONMENT} == *"hive"* ]]; then
# wait until hadoop processes is started
retry check_hadoop
@@ -173,8 +185,15 @@ else
sleep 10
run_hadoop_tests
if [[ ${ENVIRONMENT} == *"3.1-hive" ]]; then
- run_hive_transactional_tests
+ run_hive_transactional_tests
fi
+elif [[ ${ENVIRONMENT} == *"openldap"* ]]; then
+ retry check_openldap
+elif [[ ${ENVIRONMENT} == *"spark"* ]]; then
+ retry check_health spark
+else
+ echo >&2 "ERROR: no test defined for ${ENVIRONMENT}"
+ exit 2
fi
EXIT_CODE=$?
diff --git a/etc/compose/accumulo/docker-compose.yml b/etc/compose/accumulo/docker-compose.yml
new file mode 100644
index 00000000..37913895
--- /dev/null
+++ b/etc/compose/accumulo/docker-compose.yml
@@ -0,0 +1,4 @@
+version: '2.0'
+services:
+ accumulo:
+ image: testing/accumulo:latest
diff --git a/etc/compose/dns/docker-compose.yml b/etc/compose/dns/docker-compose.yml
new file mode 100644
index 00000000..1ee12f69
--- /dev/null
+++ b/etc/compose/dns/docker-compose.yml
@@ -0,0 +1,4 @@
+version: '2.0'
+services:
+ dns:
+ image: testing/dns:latest
diff --git a/etc/compose/openldap/docker-compose.yml b/etc/compose/openldap/docker-compose.yml
new file mode 100644
index 00000000..cf509aa2
--- /dev/null
+++ b/etc/compose/openldap/docker-compose.yml
@@ -0,0 +1,4 @@
+version: '2.0'
+services:
+ openldap:
+ image: testing/centos7-oj17-openldap:latest
diff --git a/etc/compose/spark3-delta/docker-compose.yml b/etc/compose/spark3-delta/docker-compose.yml
new file mode 100644
index 00000000..ec7e4923
--- /dev/null
+++ b/etc/compose/spark3-delta/docker-compose.yml
@@ -0,0 +1,4 @@
+version: '2.0'
+services:
+ spark:
+ image: testing/spark3-delta:latest
diff --git a/etc/compose/spark3-iceberg/docker-compose.yml b/etc/compose/spark3-iceberg/docker-compose.yml
new file mode 100644
index 00000000..7b01b0a8
--- /dev/null
+++ b/etc/compose/spark3-iceberg/docker-compose.yml
@@ -0,0 +1,4 @@
+version: '2.0'
+services:
+ spark:
+ image: testing/spark3-iceberg:latest
diff --git a/testing/centos7-oj11/Dockerfile b/testing/centos7-oj11/Dockerfile
new file mode 100644
index 00000000..67625cab
--- /dev/null
+++ b/testing/centos7-oj11/Dockerfile
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM library/centos:7
+
+COPY ./files /
+
+# Install Java and presto-admin dependences
+RUN \
+ set -xeu && \
+ yum install -y \
+ nc \
+ wget \
+ && \
+ \
+ # Install Zulu JDK 11.0.16.1 \
+ rpm -i https://cdn.azul.com/zulu$([ "$(arch)" != "aarch64" ] || echo "-embedded")/bin/zulu11.58.23-ca-jdk11.0.16.1-linux."$(arch)".rpm && \
+ # Set JDK 11 as a default one
+ alternatives --set java /usr/lib/jvm/zulu-11/bin/java && \
+ alternatives --set javac /usr/lib/jvm/zulu-11/bin/javac && \
+ \
+ # install supervisor
+ yum --enablerepo=extras install -y setuptools epel-release && \
+ yum install -y python-pip && \
+ pip install supervisor && \
+ \
+ # install commonly needed packages
+ yum install -y \
+ less `# helpful when troubleshooting product tests` \
+ net-tools `# netstat is required by run_on_docker.sh` \
+ sudo \
+ telnet `# helpful when troubleshooting product tests` \
+ vim `# helpful when troubleshooting product tests` \
+ jq `# helpful json processing tool` \
+ && \
+ # cleanup
+ yum -y clean all && rm -rf /tmp/* /var/tmp/*
+
+ENV PATH="/usr/local/bin:${PATH}"
+ENV JAVA_HOME=/usr/lib/jvm/zulu-11
+ENV LANG=en_US.UTF-8
diff --git a/testing/centos7-oj11/files/opt/trinodev/site-override.xslt b/testing/centos7-oj11/files/opt/trinodev/site-override.xslt
new file mode 100644
index 00000000..af603ea8
--- /dev/null
+++ b/testing/centos7-oj11/files/opt/trinodev/site-override.xslt
@@ -0,0 +1,17 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/testing/centos7-oj11/files/usr/local/bin/apply-all-site-xml-overrides b/testing/centos7-oj11/files/usr/local/bin/apply-all-site-xml-overrides
new file mode 100755
index 00000000..dae984ae
--- /dev/null
+++ b/testing/centos7-oj11/files/usr/local/bin/apply-all-site-xml-overrides
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -euo pipefail
+
+fail() {
+ echo "$(basename "$0"): $*" >&2
+ exit 1
+}
+
+if [ $# -ne 1 ]; then
+ fail "Usage: $0 " >&2
+fi
+
+overrides_dir="$1"
+
+for file in $(find $overrides_dir -name '*.xml'); do
+ target_filename="${file#"$overrides_dir"}"
+ echo "Applying configuration override from $file to $target_filename"
+ apply-site-xml-override "$target_filename" "$file"
+done
diff --git a/testing/centos7-oj11/files/usr/local/bin/apply-site-xml-override b/testing/centos7-oj11/files/usr/local/bin/apply-site-xml-override
new file mode 100755
index 00000000..31e15de0
--- /dev/null
+++ b/testing/centos7-oj11/files/usr/local/bin/apply-site-xml-override
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -euo pipefail
+
+fail() {
+ echo "$(basename "$0"): $*" >&2
+ exit 1
+}
+
+if [ $# -ne 2 ]; then
+ fail "Usage: $0 " >&2
+fi
+
+site_xml="$1"
+overrides="$2"
+site_xml_new="$1.new"
+
+test -f "${site_xml}" || fail "${site_xml} does not exist or is not a file"
+test -f "${overrides}" || fail "${overrides} does not exist or is not a file"
+test ! -e "${site_xml_new}" || fail "${site_xml_new} already exists"
+
+xsltproc --param override-path "'${overrides}'" "/opt/trinodev/site-override.xslt" "${site_xml}" > "${site_xml_new}"
+cat "${site_xml_new}" > "${site_xml}" # Preserve file owner & permissions
+rm "${site_xml_new}"
diff --git a/testing/dns/Dockerfile b/testing/dns/Dockerfile
index e33f04db..a99bac05 100644
--- a/testing/dns/Dockerfile
+++ b/testing/dns/Dockerfile
@@ -10,7 +10,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM alpine:3.3
+FROM alpine:3.16
RUN apk --no-cache add dnsmasq
EXPOSE 53 53/udp
+HEALTHCHECK --interval=10s --timeout=5s --start-period=1s \
+ CMD netstat -nltu | grep -q \:53
ENTRYPOINT ["dnsmasq", "-k"]
diff --git a/testing/phoenix5/Dockerfile b/testing/phoenix5/Dockerfile
index f66bfe13..5911836b 100644
--- a/testing/phoenix5/Dockerfile
+++ b/testing/phoenix5/Dockerfile
@@ -10,7 +10,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM testing/centos7-oj17:unlabelled
+FROM testing/centos7-oj11:unlabelled
ARG HBASE_VERSION=2.4.7
ARG ZOOKEEPER_VERSION=3.5.7
diff --git a/testing/spark3-delta/Dockerfile b/testing/spark3-delta/Dockerfile
index 62a7c0b2..70bbe6ee 100644
--- a/testing/spark3-delta/Dockerfile
+++ b/testing/spark3-delta/Dockerfile
@@ -10,7 +10,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM testing/centos7-oj17:unlabelled
+FROM testing/centos7-oj11:unlabelled
ARG SPARK_VERSION=3.2.1
ARG HADOOP_VERSION=3.2
@@ -44,6 +44,8 @@ ENV PATH="${SPARK_HOME}/bin:${PATH}"
EXPOSE 10213
+HEALTHCHECK --interval=10s --timeout=5s --start-period=10s \
+ CMD curl -f http://localhost:10213/
CMD spark-submit \
--master "local[*]" \
--class org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 \
diff --git a/testing/spark3-iceberg/Dockerfile b/testing/spark3-iceberg/Dockerfile
index 106c6a8e..c04b5095 100644
--- a/testing/spark3-iceberg/Dockerfile
+++ b/testing/spark3-iceberg/Dockerfile
@@ -10,7 +10,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM testing/centos7-oj17:unlabelled
+FROM testing/centos7-oj11:unlabelled
ARG SPARK_VERSION=3.3.0
ARG HADOOP_VERSION=3
@@ -41,3 +41,12 @@ ENV PATH="${SPARK_HOME}/bin:${PATH}"
# add hive user needed in interactions with the Apache Hive environment
RUN useradd -ms /bin/bash hive
+
+HEALTHCHECK --interval=10s --timeout=5s --start-period=10s \
+ CMD curl -f http://localhost:10213/
+CMD spark-submit \
+ --master "local[*]" \
+ --class org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 \
+ --name "Thrift JDBC/ODBC Server" \
+ --conf spark.hive.server2.thrift.port=10213 \
+ spark-internal