From cf92f842d817f26c716a833e28e1920db11ea0da Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Tue, 29 Mar 2022 10:06:57 +1100 Subject: [PATCH 01/16] feat: add support for configurable devevelopment and pr environment backups --- .../build-deploy-docker-compose.sh | 190 ++---------- .../scripts/exec-backup-generation.sh | 276 ++++++++++++++++++ 2 files changed, 295 insertions(+), 171 deletions(-) create mode 100644 images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh diff --git a/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh b/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh index 8211bcce83..470265e24e 100755 --- a/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh +++ b/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh @@ -76,6 +76,23 @@ function featureFlag() { echo "${!defaultFlagVar}" } +function projectEnvironmentVariableCheck() { + # check for argument + [ "$1" ] || return + + local flagVar + + flagVar="$1" + # check Lagoon environment variables + flagValue=$(jq -r '.[] | select(.name == "'"$flagVar"'") | .value' <<<"$LAGOON_ENVIRONMENT_VARIABLES") + [ "$flagValue" ] && echo "$flagValue" && return + # check Lagoon project variables + flagValue=$(jq -r '.[] | select(.name == "'"$flagVar"'") | .value' <<<"$LAGOON_PROJECT_VARIABLES") + [ "$flagValue" ] && echo "$flagValue" && return + + echo "$2" +} + set +x SCC_CHECK=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get pod ${LAGOON_BUILD_NAME} -o json | jq -r '.metadata.annotations."openshift.io/scc" // false') set -x @@ -193,30 +210,6 @@ HELM_ARGUMENTS=() for CAPABILITIES in "${CAPABILITIES[@]}"; do HELM_ARGUMENTS+=(-a "${CAPABILITIES}") done - -# Implement global default values for backup retention periods -if [ -z "$MONTHLY_BACKUP_DEFAULT_RETENTION" ] -then - MONTHLY_BACKUP_DEFAULT_RETENTION=1 -fi -if [ -z "$WEEKLY_BACKUP_DEFAULT_RETENTION" ] -then - WEEKLY_BACKUP_DEFAULT_RETENTION=6 -fi -if [ -z "$DAILY_BACKUP_DEFAULT_RETENTION" ] -then - DAILY_BACKUP_DEFAULT_RETENTION=7 -fi -if [ -z "$HOURLY_BACKUP_DEFAULT_RETENTION" ] -then - HOURLY_BACKUP_DEFAULT_RETENTION=0 -fi - -# Implement global default value for backup schedule -if [ -z "$DEFAULT_BACKUP_SCHEDULE" ] -then - DEFAULT_BACKUP_SCHEDULE="M H(22-2) * * *" -fi set -x set +x # reduce noise in build logs @@ -1078,154 +1071,9 @@ patchBuildStep "${buildStartTime}" "${previousStepEnd}" "${currentStepEnd}" "${N previousStepEnd=${currentStepEnd} set -x -############################################## -### Backup Settings -############################################## - -# If k8up is supported by this cluster we create the schedule definition -if [[ "${CAPABILITIES[@]}" =~ "backup.appuio.ch/v1alpha1/Schedule" ]]; then - - # Parse out custom baas backup location variables - if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then - BAAS_CUSTOM_BACKUP_ENDPOINT=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_BACKUP_ENDPOINT") | "\(.value)"')) - BAAS_CUSTOM_BACKUP_BUCKET=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_BACKUP_BUCKET") | "\(.value)"')) - BAAS_CUSTOM_BACKUP_ACCESS_KEY=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_BACKUP_ACCESS_KEY") | "\(.value)"')) - BAAS_CUSTOM_BACKUP_SECRET_KEY=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_BACKUP_SECRET_KEY") | "\(.value)"')) - - if [ ! -z $BAAS_CUSTOM_BACKUP_ENDPOINT ] && [ ! -z $BAAS_CUSTOM_BACKUP_BUCKET ] && [ ! -z $BAAS_CUSTOM_BACKUP_ACCESS_KEY ] && [ ! -z $BAAS_CUSTOM_BACKUP_SECRET_KEY ]; then - CUSTOM_BAAS_BACKUP_ENABLED=1 - - HELM_CUSTOM_BAAS_BACKUP_ACCESS_KEY=${BAAS_CUSTOM_BACKUP_ACCESS_KEY} - HELM_CUSTOM_BAAS_BACKUP_SECRET_KEY=${BAAS_CUSTOM_BACKUP_SECRET_KEY} - else - set +x - kubectl --insecure-skip-tls-verify -n ${NAMESPACE} delete secret baas-custom-backup-credentials --ignore-not-found - set -x - fi - fi - - # Parse out custom baas restore location variables - if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then - BAAS_CUSTOM_RESTORE_ENDPOINT=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_RESTORE_ENDPOINT") | "\(.value)"')) - BAAS_CUSTOM_RESTORE_BUCKET=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_RESTORE_BUCKET") | "\(.value)"')) - BAAS_CUSTOM_RESTORE_ACCESS_KEY=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_RESTORE_ACCESS_KEY") | "\(.value)"')) - BAAS_CUSTOM_RESTORE_SECRET_KEY=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_RESTORE_SECRET_KEY") | "\(.value)"')) - - if [ ! -z $BAAS_CUSTOM_RESTORE_ENDPOINT ] && [ ! -z $BAAS_CUSTOM_RESTORE_BUCKET ] && [ ! -z $BAAS_CUSTOM_RESTORE_ACCESS_KEY ] && [ ! -z $BAAS_CUSTOM_RESTORE_SECRET_KEY ]; then - HELM_CUSTOM_BAAS_RESTORE_ACCESS_KEY=${BAAS_CUSTOM_RESTORE_ACCESS_KEY} - HELM_CUSTOM_BAAS_RESTORE_SECRET_KEY=${BAAS_CUSTOM_RESTORE_SECRET_KEY} - else - set +x - kubectl --insecure-skip-tls-verify -n ${NAMESPACE} delete secret baas-custom-restore-credentials --ignore-not-found - set -x - fi - fi - if ! kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get secret baas-repo-pw &> /dev/null; then - # Create baas-repo-pw secret based on the project secret - set +x - kubectl --insecure-skip-tls-verify -n ${NAMESPACE} create secret generic baas-repo-pw --from-literal=repo-pw=$(echo -n "$PROJECT_SECRET-BAAS-REPO-PW" | sha256sum | cut -d " " -f 1) - set -x - fi - - TEMPLATE_PARAMETERS=() - - set +x # reduce noise in build logs - # Check for custom baas bucket name - if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then - BAAS_BUCKET_NAME=$(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_BUCKET_NAME") | "\(.value)"') - fi - if [ -z $BAAS_BUCKET_NAME ]; then - BAAS_BUCKET_NAME=baas-${PROJECT} - fi - set -x - - # Pull in .lagoon.yml variables - PRODUCTION_MONTHLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.monthly "") - PRODUCTION_WEEKLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.weekly "") - PRODUCTION_DAILY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.daily "") - PRODUCTION_HOURLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.hourly "") - - # Set template parameters for retention values (prefer .lagoon.yml values over supplied defaults after ensuring they are valid integers via "-eq" comparison) - if [[ ! -z $PRODUCTION_MONTHLY_BACKUP_RETENTION ]] && [[ "$PRODUCTION_MONTHLY_BACKUP_RETENTION" -eq "$PRODUCTION_MONTHLY_BACKUP_RETENTION" ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then - MONTHLY_BACKUP_RETENTION=${PRODUCTION_MONTHLY_BACKUP_RETENTION} - else - MONTHLY_BACKUP_RETENTION=${MONTHLY_BACKUP_DEFAULT_RETENTION} - fi - if [[ ! -z $PRODUCTION_WEEKLY_BACKUP_RETENTION ]] && [[ "$PRODUCTION_WEEKLY_BACKUP_RETENTION" -eq "$PRODUCTION_WEEKLY_BACKUP_RETENTION" ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then - WEEKLY_BACKUP_RETENTION=${PRODUCTION_WEEKLY_BACKUP_RETENTION} - else - WEEKLY_BACKUP_RETENTION=${WEEKLY_BACKUP_DEFAULT_RETENTION} - fi - if [[ ! -z $PRODUCTION_DAILY_BACKUP_RETENTION ]] && [[ "$PRODUCTION_DAILY_BACKUP_RETENTION" -eq "$PRODUCTION_DAILY_BACKUP_RETENTION" ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then - DAILY_BACKUP_RETENTION=${PRODUCTION_DAILY_BACKUP_RETENTION} - else - DAILY_BACKUP_RETENTION=${DAILY_BACKUP_DEFAULT_RETENTION} - fi - if [[ ! -z $PRODUCTION_HOURLY_BACKUP_RETENTION ]] && [[ "$PRODUCTION_HOURLY_BACKUP_RETENTION" -eq "$PRODUCTION_HOURLY_BACKUP_RETENTION" ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then - HOURLY_BACKUP_RETENTION=${PRODUCTION_HOURLY_BACKUP_RETENTION} - else - HOURLY_BACKUP_RETENTION=${HOURLY_BACKUP_DEFAULT_RETENTION} - fi - - # Set template parameters for backup schedule value (prefer .lagoon.yml values over supplied defaults after ensuring they are valid) - PRODUCTION_BACKUP_SCHEDULE=$(cat .lagoon.yml | shyaml get-value backup-schedule.production "") - - if [[ ! -z $PRODUCTION_BACKUP_SCHEDULE ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then - if [[ "$PRODUCTION_BACKUP_SCHEDULE" =~ ^M\ ]]; then - BACKUP_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "${PRODUCTION_BACKUP_SCHEDULE}") - else - echo "Error parsing custom backup schedule: '$PRODUCTION_BACKUP_SCHEDULE'"; exit 1 - fi - else - BACKUP_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "${DEFAULT_BACKUP_SCHEDULE}") - fi - - if [ ! -z $K8UP_WEEKLY_RANDOM_FEATURE_FLAG ] && [ $K8UP_WEEKLY_RANDOM_FEATURE_FLAG = 'enabled' ]; then - # Let the controller deduplicate checks (will run weekly at a random time throughout the week) - CHECK_SCHEDULE="@weekly-random" - else - # Run Checks on Sunday at 0300-0600 - CHECK_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "M H(3-6) * * 0") - fi - - if [ ! -z $K8UP_WEEKLY_RANDOM_FEATURE_FLAG ] && [ $K8UP_WEEKLY_RANDOM_FEATURE_FLAG = 'enabled' ]; then - # Let the controller deduplicate prunes (will run weekly at a random time throughout the week) - PRUNE_SCHEDULE="@weekly-random" - else - # Run Prune on Saturday at 0300-0600 - PRUNE_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "M H(3-6) * * 6") - fi - - # Set the S3 variables which should be passed to the helm chart - if [ ! -z $CUSTOM_BAAS_BACKUP_ENABLED ]; then - BAAS_BACKUP_ENDPOINT=${BAAS_CUSTOM_BACKUP_ENDPOINT} - BAAS_BACKUP_BUCKET=${BAAS_CUSTOM_BACKUP_BUCKET} - BAAS_BACKUP_SECRET_NAME='lagoon-baas-custom-backup-credentials' - else - BAAS_BACKUP_ENDPOINT='' - BAAS_BACKUP_BUCKET=${BAAS_BUCKET_NAME} - BAAS_BACKUP_SECRET_NAME='' - fi - - OPENSHIFT_TEMPLATE="/kubectl-build-deploy/openshift-templates/backup-schedule.yml" - helm template k8up-lagoon-backup-schedule /kubectl-build-deploy/helmcharts/k8up-schedule \ - -f /kubectl-build-deploy/values.yaml \ - --set backup.schedule="${BACKUP_SCHEDULE}" \ - --set check.schedule="${CHECK_SCHEDULE}" \ - --set prune.schedule="${PRUNE_SCHEDULE}" \ - --set prune.retention.keepMonthly=${MONTHLY_BACKUP_RETENTION} \ - --set prune.retention.keepWeekly=${WEEKLY_BACKUP_RETENTION} \ - --set prune.retention.keepDaily=${DAILY_BACKUP_RETENTION} \ - --set prune.retention.keepHourly=${HOURLY_BACKUP_RETENTION} \ - --set s3.endpoint="${BAAS_BACKUP_ENDPOINT}" \ - --set s3.bucket="${BAAS_BACKUP_BUCKET}" \ - --set s3.secretName="${BAAS_BACKUP_SECRET_NAME}" \ - --set customRestoreLocation.accessKey="${BAAS_CUSTOM_RESTORE_ACCESS_KEY}" \ - --set customRestoreLocation.secretKey="${BAAS_CUSTOM_RESTORE_SECRET_KEY}" \ - --set customBackupLocation.accessKey="${BAAS_CUSTOM_BACKUP_ACCESS_KEY}" \ - --set customBackupLocation.secretKey="${BAAS_CUSTOM_BACKUP_SECRET_KEY}" "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/k8up-lagoon-backup-schedule.yaml -fi +# Run the backup generation script +. /kubectl-build-deploy/scripts/exec-backup-generation.sh # check for ISOLATION_NETWORK_POLICY feature flag, disabled by default set +x diff --git a/images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh b/images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh new file mode 100644 index 0000000000..2adf93f9d6 --- /dev/null +++ b/images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh @@ -0,0 +1,276 @@ +#!/bin/bash + +set +x +############################################## +# it is possible to override the retention using a variable defined in the api +# +# if you want to use a different retention period for development branches, you can use the following +# LAGOON_BACKUP_DEV_RETENTION="H:D:W:M" +# +# if you want to use a different retention period for pullrequest environments, you can use the following +# LAGOON_BACKUP_PR_RETENTION="H:D:W:M" +# +# Where the value H:D:W:M (hourly:daily:weekly:monthly) is numbers representing the retention period +# eg: 0:7:6:1 +# 0 Hourly +# 7 Daily +# 6 Weekly +# 1 Montly +# +############################################## + +# check if a specific override has been defined in the api +case "$BUILD_TYPE" in + branch) + if [ "${ENVIRONMENT_TYPE}" == "development" ]; then + # check if the API defined variable LAGOON_BACKUP_DEV_RETENTION contains what is needed + # if one in the API is not defined, fall back to what could be injected by the controller LAGOON_FEATURE_BACKUP_DEV_RETENTION + BACKUP_RETENTION=$(projectEnvironmentVariableCheck LAGOON_BACKUP_DEV_RETENTION "${LAGOON_FEATURE_BACKUP_DEV_RETENTION}") + if [ ! -z "$BACKUP_RETENTION" ]; then + IFS=':' read -ra BACKUP_RETENTION_SPLIT <<< "$BACKUP_RETENTION" + HOURLY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[0]} + DAILY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[1]} + WEEKLY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[2]} + MONTHLY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[3]} + fi + fi + ;; + pullrequest) + # check if the API defined variable LAGOON_BACKUP_PR_RETENTION contains what is needed + # if one in the API is not defined, fall back to what could be injected by the controller LAGOON_FEATURE_BACKUP_PR_RETENTION + BACKUP_RETENTION=$(projectEnvironmentVariableCheck LAGOON_BACKUP_PR_RETENTION "${LAGOON_FEATURE_BACKUP_PR_RETENTION}") + if [ ! -z "$BACKUP_RETENTION" ]; then + IFS=':' read -ra BACKUP_RETENTION_SPLIT <<< "$BACKUP_RETENTION" + HOURLY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[0]} + DAILY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[1]} + WEEKLY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[2]} + MONTHLY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[3]} + fi + if [ -z "$BACKUP_RETENTION" ];then + ## fall back to dev retention if no pr retention is defined + # check if the API defined variable LAGOON_BACKUP_DEV_RETENTION contains what is needed + # if one in the API is not defined, fall back to what could be injected by the controller LAGOON_FEATURE_BACKUP_DEV_RETENTION + BACKUP_RETENTION=$(projectEnvironmentVariableCheck LAGOON_BACKUP_DEV_RETENTION "${LAGOON_FEATURE_BACKUP_DEV_RETENTION}") + if [ ! -z "$BACKUP_RETENTION" ]; then + IFS=':' read -ra BACKUP_RETENTION_SPLIT <<< "$BACKUP_RETENTION" + HOURLY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[0]} + DAILY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[1]} + WEEKLY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[2]} + MONTHLY_BACKUP_DEFAULT_RETENTION=${BACKUP_RETENTION_SPLIT[3]} + fi + fi + ;; + *) + echo "${BUILD_TYPE} not implemented"; exit 1; +esac + +# Implement global default value for backup retentions +if [ -z "$MONTHLY_BACKUP_DEFAULT_RETENTION" ] +then + MONTHLY_BACKUP_DEFAULT_RETENTION=1 +fi +if [ -z "$WEEKLY_BACKUP_DEFAULT_RETENTION" ] +then + WEEKLY_BACKUP_DEFAULT_RETENTION=6 +fi +if [ -z "$DAILY_BACKUP_DEFAULT_RETENTION" ] +then + DAILY_BACKUP_DEFAULT_RETENTION=7 +fi +if [ -z "$HOURLY_BACKUP_DEFAULT_RETENTION" ] +then + HOURLY_BACKUP_DEFAULT_RETENTION=0 +fi + +############################################## +# it is possible to override the schedule using a variable defined in the api +# +# if you want to use a different schedule period for development branches, you can use the following +# LAGOON_BACKUP_DEV_SCHEDULE="M H(22-2) * * *" +# +# if you want to use a different retention period for pullrequest environments, you can use the following +# LAGOON_BACKUP_PR_SCHEDULE="M H(22-2) * * *" +# +# Where the value is a supported cronjob pattern for k8up +############################################## + + +# check if a specific override has been defined in the api +case "$BUILD_TYPE" in + branch) + if [ "${ENVIRONMENT_TYPE}" == "development" ]; then + # check if the API defined variable LAGOON_BACKUP_DEV_SCHEDULE contains what is needed + # if one in the API is not defined, fall back to what could be injected by the controller LAGOON_FEATURE_BACKUP_DEV_SCHEDULE + DEFAULT_BACKUP_SCHEDULE=$(projectEnvironmentVariableCheck LAGOON_BACKUP_DEV_SCHEDULE "${LAGOON_FEATURE_BACKUP_DEV_SCHEDULE}") + fi + ;; + pullrequest) + # check if the API defined variable LAGOON_BACKUP_PR_SCHEDULE contains what is needed + # if one in the API is not defined, fall back to what could be injected by the controller LAGOON_FEATURE_BACKUP_PR_SCHEDULE + DEFAULT_BACKUP_SCHEDULE=$(projectEnvironmentVariableCheck LAGOON_BACKUP_PR_SCHEDULE "${LAGOON_FEATURE_BACKUP_PR_SCHEDULE}") + if [ -z "$DEFAULT_BACKUP_SCHEDULE" ];then + ## fall back to dev schedule if no pr schedule is defined + # check if the API defined variable LAGOON_BACKUP_DEV_SCHEDULE contains what is needed + # if one in the API is not defined, fall back to what could be injected by the controller LAGOON_FEATURE_BACKUP_DEV_SCHEDULE + DEFAULT_BACKUP_SCHEDULE=$(projectEnvironmentVariableCheck LAGOON_BACKUP_DEV_SCHEDULE "${LAGOON_FEATURE_BACKUP_DEV_SCHEDULE}") + fi + ;; + *) + echo "${BUILD_TYPE} not implemented"; exit 1; +esac + +# Implement global default value for backup schedule +if [ -z "$DEFAULT_BACKUP_SCHEDULE" ] +then + DEFAULT_BACKUP_SCHEDULE="M H(22-2) * * *" +fi +set -x + +############################################## +### Backup Settings +############################################## + +# If k8up is supported by this cluster we create the schedule definition +if [[ "${CAPABILITIES[@]}" =~ "backup.appuio.ch/v1alpha1/Schedule" ]]; then + + # Parse out custom baas backup location variables + if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then + BAAS_CUSTOM_BACKUP_ENDPOINT=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_BACKUP_ENDPOINT") | "\(.value)"')) + BAAS_CUSTOM_BACKUP_BUCKET=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_BACKUP_BUCKET") | "\(.value)"')) + BAAS_CUSTOM_BACKUP_ACCESS_KEY=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_BACKUP_ACCESS_KEY") | "\(.value)"')) + BAAS_CUSTOM_BACKUP_SECRET_KEY=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_BACKUP_SECRET_KEY") | "\(.value)"')) + + if [ ! -z $BAAS_CUSTOM_BACKUP_ENDPOINT ] && [ ! -z $BAAS_CUSTOM_BACKUP_BUCKET ] && [ ! -z $BAAS_CUSTOM_BACKUP_ACCESS_KEY ] && [ ! -z $BAAS_CUSTOM_BACKUP_SECRET_KEY ]; then + CUSTOM_BAAS_BACKUP_ENABLED=1 + + HELM_CUSTOM_BAAS_BACKUP_ACCESS_KEY=${BAAS_CUSTOM_BACKUP_ACCESS_KEY} + HELM_CUSTOM_BAAS_BACKUP_SECRET_KEY=${BAAS_CUSTOM_BACKUP_SECRET_KEY} + else + set +x + kubectl --insecure-skip-tls-verify -n ${NAMESPACE} delete secret baas-custom-backup-credentials --ignore-not-found + set -x + fi + fi + + # Parse out custom baas restore location variables + if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then + BAAS_CUSTOM_RESTORE_ENDPOINT=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_RESTORE_ENDPOINT") | "\(.value)"')) + BAAS_CUSTOM_RESTORE_BUCKET=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_RESTORE_BUCKET") | "\(.value)"')) + BAAS_CUSTOM_RESTORE_ACCESS_KEY=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_RESTORE_ACCESS_KEY") | "\(.value)"')) + BAAS_CUSTOM_RESTORE_SECRET_KEY=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_CUSTOM_RESTORE_SECRET_KEY") | "\(.value)"')) + + if [ ! -z $BAAS_CUSTOM_RESTORE_ENDPOINT ] && [ ! -z $BAAS_CUSTOM_RESTORE_BUCKET ] && [ ! -z $BAAS_CUSTOM_RESTORE_ACCESS_KEY ] && [ ! -z $BAAS_CUSTOM_RESTORE_SECRET_KEY ]; then + HELM_CUSTOM_BAAS_RESTORE_ACCESS_KEY=${BAAS_CUSTOM_RESTORE_ACCESS_KEY} + HELM_CUSTOM_BAAS_RESTORE_SECRET_KEY=${BAAS_CUSTOM_RESTORE_SECRET_KEY} + else + set +x + kubectl --insecure-skip-tls-verify -n ${NAMESPACE} delete secret baas-custom-restore-credentials --ignore-not-found + set -x + fi + fi + + if ! kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get secret baas-repo-pw &> /dev/null; then + # Create baas-repo-pw secret based on the project secret + set +x + kubectl --insecure-skip-tls-verify -n ${NAMESPACE} create secret generic baas-repo-pw --from-literal=repo-pw=$(echo -n "$PROJECT_SECRET-BAAS-REPO-PW" | sha256sum | cut -d " " -f 1) + set -x + fi + + TEMPLATE_PARAMETERS=() + + set +x # reduce noise in build logs + # Check for custom baas bucket name + if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then + BAAS_BUCKET_NAME=$(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_BUCKET_NAME") | "\(.value)"') + fi + if [ -z $BAAS_BUCKET_NAME ]; then + BAAS_BUCKET_NAME=baas-${PROJECT} + fi + set -x + + # Pull in .lagoon.yml variables + PRODUCTION_MONTHLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.monthly "") + PRODUCTION_WEEKLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.weekly "") + PRODUCTION_DAILY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.daily "") + PRODUCTION_HOURLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.hourly "") + + # Set template parameters for retention values (prefer .lagoon.yml values over supplied defaults after ensuring they are valid integers via "-eq" comparison) + if [[ ! -z $PRODUCTION_MONTHLY_BACKUP_RETENTION ]] && [[ "$PRODUCTION_MONTHLY_BACKUP_RETENTION" -eq "$PRODUCTION_MONTHLY_BACKUP_RETENTION" ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then + MONTHLY_BACKUP_RETENTION=${PRODUCTION_MONTHLY_BACKUP_RETENTION} + else + MONTHLY_BACKUP_RETENTION=${MONTHLY_BACKUP_DEFAULT_RETENTION} + fi + if [[ ! -z $PRODUCTION_WEEKLY_BACKUP_RETENTION ]] && [[ "$PRODUCTION_WEEKLY_BACKUP_RETENTION" -eq "$PRODUCTION_WEEKLY_BACKUP_RETENTION" ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then + WEEKLY_BACKUP_RETENTION=${PRODUCTION_WEEKLY_BACKUP_RETENTION} + else + WEEKLY_BACKUP_RETENTION=${WEEKLY_BACKUP_DEFAULT_RETENTION} + fi + if [[ ! -z $PRODUCTION_DAILY_BACKUP_RETENTION ]] && [[ "$PRODUCTION_DAILY_BACKUP_RETENTION" -eq "$PRODUCTION_DAILY_BACKUP_RETENTION" ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then + DAILY_BACKUP_RETENTION=${PRODUCTION_DAILY_BACKUP_RETENTION} + else + DAILY_BACKUP_RETENTION=${DAILY_BACKUP_DEFAULT_RETENTION} + fi + if [[ ! -z $PRODUCTION_HOURLY_BACKUP_RETENTION ]] && [[ "$PRODUCTION_HOURLY_BACKUP_RETENTION" -eq "$PRODUCTION_HOURLY_BACKUP_RETENTION" ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then + HOURLY_BACKUP_RETENTION=${PRODUCTION_HOURLY_BACKUP_RETENTION} + else + HOURLY_BACKUP_RETENTION=${HOURLY_BACKUP_DEFAULT_RETENTION} + fi + + # Set template parameters for backup schedule value (prefer .lagoon.yml values over supplied defaults after ensuring they are valid) + PRODUCTION_BACKUP_SCHEDULE=$(cat .lagoon.yml | shyaml get-value backup-schedule.production "") + + if [[ ! -z $PRODUCTION_BACKUP_SCHEDULE ]] && [[ $ENVIRONMENT_TYPE = 'production' ]]; then + if [[ "$PRODUCTION_BACKUP_SCHEDULE" =~ ^M\ ]]; then + BACKUP_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "${PRODUCTION_BACKUP_SCHEDULE}") + else + echo "Error parsing custom backup schedule: '$PRODUCTION_BACKUP_SCHEDULE'"; exit 1 + fi + else + BACKUP_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "${DEFAULT_BACKUP_SCHEDULE}") + fi + + if [ ! -z $K8UP_WEEKLY_RANDOM_FEATURE_FLAG ] && [ $K8UP_WEEKLY_RANDOM_FEATURE_FLAG = 'enabled' ]; then + # Let the controller deduplicate checks (will run weekly at a random time throughout the week) + CHECK_SCHEDULE="@weekly-random" + else + # Run Checks on Sunday at 0300-0600 + CHECK_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "M H(3-6) * * 0") + fi + + if [ ! -z $K8UP_WEEKLY_RANDOM_FEATURE_FLAG ] && [ $K8UP_WEEKLY_RANDOM_FEATURE_FLAG = 'enabled' ]; then + # Let the controller deduplicate prunes (will run weekly at a random time throughout the week) + PRUNE_SCHEDULE="@weekly-random" + else + # Run Prune on Saturday at 0300-0600 + PRUNE_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "M H(3-6) * * 6") + fi + + # Set the S3 variables which should be passed to the helm chart + if [ ! -z $CUSTOM_BAAS_BACKUP_ENABLED ]; then + BAAS_BACKUP_ENDPOINT=${BAAS_CUSTOM_BACKUP_ENDPOINT} + BAAS_BACKUP_BUCKET=${BAAS_CUSTOM_BACKUP_BUCKET} + BAAS_BACKUP_SECRET_NAME='lagoon-baas-custom-backup-credentials' + else + BAAS_BACKUP_ENDPOINT='' + BAAS_BACKUP_BUCKET=${BAAS_BUCKET_NAME} + BAAS_BACKUP_SECRET_NAME='' + fi + + OPENSHIFT_TEMPLATE="/kubectl-build-deploy/openshift-templates/backup-schedule.yml" + helm template k8up-lagoon-backup-schedule /kubectl-build-deploy/helmcharts/k8up-schedule \ + -f /kubectl-build-deploy/values.yaml \ + --set backup.schedule="${BACKUP_SCHEDULE}" \ + --set check.schedule="${CHECK_SCHEDULE}" \ + --set prune.schedule="${PRUNE_SCHEDULE}" \ + --set prune.retention.keepMonthly=${MONTHLY_BACKUP_RETENTION} \ + --set prune.retention.keepWeekly=${WEEKLY_BACKUP_RETENTION} \ + --set prune.retention.keepDaily=${DAILY_BACKUP_RETENTION} \ + --set prune.retention.keepHourly=${HOURLY_BACKUP_RETENTION} \ + --set s3.endpoint="${BAAS_BACKUP_ENDPOINT}" \ + --set s3.bucket="${BAAS_BACKUP_BUCKET}" \ + --set s3.secretName="${BAAS_BACKUP_SECRET_NAME}" \ + --set customRestoreLocation.accessKey="${BAAS_CUSTOM_RESTORE_ACCESS_KEY}" \ + --set customRestoreLocation.secretKey="${BAAS_CUSTOM_RESTORE_SECRET_KEY}" \ + --set customBackupLocation.accessKey="${BAAS_CUSTOM_BACKUP_ACCESS_KEY}" \ + --set customBackupLocation.secretKey="${BAAS_CUSTOM_BACKUP_SECRET_KEY}" "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/k8up-lagoon-backup-schedule.yaml +fi \ No newline at end of file From 32fbbc0c689a00cbb3ec4e19505094e3d8ff6e8e Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Tue, 29 Mar 2022 12:57:32 +1100 Subject: [PATCH 02/16] chore: make script executable --- .../kubectl-build-deploy-dind/scripts/exec-backup-generation.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh diff --git a/images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh b/images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh old mode 100644 new mode 100755 From 4cbf4c63c8f0adbd876fac4672635fe95c3a10c1 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Tue, 29 Mar 2022 14:13:32 +1100 Subject: [PATCH 03/16] fix: add promote --- .../scripts/exec-backup-generation.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh b/images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh index 2adf93f9d6..1302a48a03 100755 --- a/images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh +++ b/images/kubectl-build-deploy-dind/scripts/exec-backup-generation.sh @@ -21,6 +21,8 @@ set +x # check if a specific override has been defined in the api case "$BUILD_TYPE" in + promote) + ;; branch) if [ "${ENVIRONMENT_TYPE}" == "development" ]; then # check if the API defined variable LAGOON_BACKUP_DEV_RETENTION contains what is needed @@ -97,6 +99,8 @@ fi # check if a specific override has been defined in the api case "$BUILD_TYPE" in + promote) + ;; branch) if [ "${ENVIRONMENT_TYPE}" == "development" ]; then # check if the API defined variable LAGOON_BACKUP_DEV_SCHEDULE contains what is needed From 47f144cfb62a7df7ac5937aa8daf6b19788b1de8 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Mon, 11 Apr 2022 13:56:31 +1000 Subject: [PATCH 04/16] chore: add task name generator --- node-packages/commons/src/util.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/node-packages/commons/src/util.ts b/node-packages/commons/src/util.ts index bbc7f8ed28..704e6fcb2a 100644 --- a/node-packages/commons/src/util.ts +++ b/node-packages/commons/src/util.ts @@ -5,6 +5,10 @@ export const generateBuildId = function() { return `lagoon-build-${Math.random().toString(36).substring(7)}`; }; +export const generateTaskName = function() { + return `lagoon-task-${Math.random().toString(36).substring(7)}`; +}; + export const jsonMerge = function(a, b, prop) { var reduced = a.filter(function(aitem) { return !b.find(function(bitem) { From c098159436506d06cbe275181b60fe98cceba2e8 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Tue, 12 Apr 2022 08:27:28 +1000 Subject: [PATCH 05/16] feat: add taskname field to tasks --- .../docker-entrypoint-initdb.d/00-tables.sql | 3 +- .../01-migrations.sql | 94 +++++++++++++++++++ services/api/src/resolvers.js | 8 ++ services/api/src/resources/task/helpers.ts | 6 ++ services/api/src/resources/task/resolvers.ts | 42 ++++++++- services/api/src/resources/task/sql.ts | 3 + .../task/task_definition_resolvers.ts | 3 + services/api/src/typeDefs.js | 12 ++- 8 files changed, 168 insertions(+), 3 deletions(-) diff --git a/services/api-db/docker-entrypoint-initdb.d/00-tables.sql b/services/api-db/docker-entrypoint-initdb.d/00-tables.sql index 3fdec7f4de..352087fa48 100644 --- a/services/api-db/docker-entrypoint-initdb.d/00-tables.sql +++ b/services/api-db/docker-entrypoint-initdb.d/00-tables.sql @@ -183,10 +183,11 @@ CREATE TABLE IF NOT EXISTS environment_service ( CREATE TABLE IF NOT EXISTS task ( id int NOT NULL auto_increment PRIMARY KEY, name varchar(100) NOT NULL, + task_name varchar(100) NULL, environment int NOT NULL REFERENCES environment (id), service varchar(100) NOT NULL, command varchar(300) NOT NULL, - status ENUM('active', 'succeeded', 'failed') NOT NULL, + status ENUM('new', 'pending', 'running', 'cancelled', 'error', 'failed', 'complete', 'active', 'succeeded') NOT NULL, created datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, started datetime NULL, completed datetime NULL, diff --git a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql index 5d38457a6d..c874ffcd8f 100644 --- a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql +++ b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql @@ -1631,6 +1631,97 @@ CREATE OR REPLACE PROCEDURE END; $$ +CREATE OR REPLACE PROCEDURE + add_task_name_to_tasks() + + BEGIN + IF NOT EXISTS ( + SELECT NULL + FROM INFORMATION_SCHEMA.COLUMNS + WHERE + table_name = 'task' + AND table_schema = 'infrastructure' + AND column_name = 'task_name' + ) THEN + ALTER TABLE `task` + ADD `task_name` varchar(100) NULL; + END IF; + END; +$$ + +CREATE OR REPLACE PROCEDURE + add_new_task_status_types() + + BEGIN + IF NOT EXISTS ( + SELECT NULL + FROM INFORMATION_SCHEMA.COLUMNS + WHERE + table_name = 'task' + AND table_schema = 'infrastructure' + AND column_name = 'status' + AND column_type like '%''cancelled%' + ) THEN + ALTER TABLE `task` + MODIFY status ENUM('new', 'pending', 'running', 'cancelled', 'error', 'failed', 'complete', 'active', 'succeeded') NOT NULL; + END IF; + END; +$$ + +-- update any active or succeeded statuses to be running or complete +CREATE OR REPLACE PROCEDURE + update_active_succeeded_tasks() + + BEGIN + UPDATE task t + SET + t.status = 'running' + WHERE + t.task_name = 'active'; + UPDATE task t + SET + t.status = 'complete' + WHERE + t.task_name = 'succeeded'; + END; +$$ + +-- generate a taskname for tasks missing one +CREATE OR REPLACE PROCEDURE + update_missing_tasknames() + + BEGIN + UPDATE task t + SET + t.task_name = CONCAT('lagoon-task-', (SELECT LEFT(UUID(), 6))) + WHERE + t.task_name IS NULL; +END WHILE; + + + END; +$$ + +-- TODO: Eventually the `active/succeeded` values should go away once `remote-controller` is updated to send the correct values +-- CREATE OR REPLACE PROCEDURE +-- remove_active_succeeded_task_types() + +-- BEGIN +-- IF NOT EXISTS ( +-- SELECT NULL +-- FROM INFORMATION_SCHEMA.COLUMNS +-- WHERE +-- table_name = 'task' +-- AND table_schema = 'infrastructure' +-- AND column_name = 'status' +-- AND column_type like '%''cancelled%' +-- ) THEN +-- ALTER TABLE `task` +-- MODIFY status ENUM('new', 'pending', 'running', 'cancelled', 'error', 'failed', 'complete') NOT NULL; +-- END IF; +-- END; +-- $$ + DELIMITER ; -- If adding new procedures, add them to the bottom of this list @@ -1715,6 +1806,9 @@ CALL change_name_index_for_advanced_task_argument(); CALL add_confirmation_text_to_advanced_task_def(); CALL add_display_name_to_advanced_task_argument(); CALL add_ecdsa_ssh_key_types(); +CALL add_new_task_status_types(); +CALL update_active_succeeded_tasks(); +CALL update_missing_tasknames(); -- Drop legacy SSH key procedures DROP PROCEDURE IF EXISTS CreateProjectSshKey; diff --git a/services/api/src/resolvers.js b/services/api/src/resolvers.js index 7f27b33ebc..14ed9033f1 100644 --- a/services/api/src/resolvers.js +++ b/services/api/src/resolvers.js @@ -54,6 +54,7 @@ const { const { getTasksByEnvironmentId, + getTaskByTaskName, getTaskByRemoteId, getTaskById, addTask, @@ -306,6 +307,12 @@ const resolvers = { PROBLEM: 'problem', }, TaskStatusType: { + NEW: 'new', + PENDING: 'pending', + RUNNING: 'running', + CANCELLED: 'cancelled', + ERROR: 'error', + COMPLETE: 'complete', ACTIVE: 'active', SUCCEEDED: 'succeeded', FAILED: 'failed', @@ -432,6 +439,7 @@ const resolvers = { userCanSshToEnvironment, deploymentByRemoteId: getDeploymentByRemoteId, deploymentsByBulkId: getDeploymentsByBulkId, + taskByTaskName: getTaskByTaskName, taskByRemoteId: getTaskByRemoteId, taskById: getTaskById, advancedTaskDefinitionById, diff --git a/services/api/src/resources/task/helpers.ts b/services/api/src/resources/task/helpers.ts index 919e92ba4a..5cea9ac332 100644 --- a/services/api/src/resources/task/helpers.ts +++ b/services/api/src/resources/task/helpers.ts @@ -14,6 +14,7 @@ export const Helpers = (sqlClientPool: Pool) => ({ addTask: async ({ id, name, + taskName, status, created, started, @@ -26,6 +27,7 @@ export const Helpers = (sqlClientPool: Pool) => ({ }: { id?: number; name: string; + taskName: string; status?: string; created?: string; started?: string; @@ -41,6 +43,7 @@ export const Helpers = (sqlClientPool: Pool) => ({ Sql.insertTask({ id, name, + taskName, status, created, started, @@ -99,6 +102,7 @@ export const Helpers = (sqlClientPool: Pool) => ({ { id, name, + taskName, status, created, started, @@ -112,6 +116,7 @@ export const Helpers = (sqlClientPool: Pool) => ({ }: { id?: number, name: string, + taskName: string, status?: string, created?: string, started?: string, @@ -142,6 +147,7 @@ export const Helpers = (sqlClientPool: Pool) => ({ Sql.insertTask({ id, name, + taskName, status, created, started, diff --git a/services/api/src/resources/task/resolvers.ts b/services/api/src/resources/task/resolvers.ts index 7ebdf870c2..4d60eb466c 100644 --- a/services/api/src/resources/task/resolvers.ts +++ b/services/api/src/resources/task/resolvers.ts @@ -13,6 +13,7 @@ import { Helpers as projectHelpers } from '../project/helpers'; import { Validators as envValidators } from '../environment/validators'; import S3 from 'aws-sdk/clients/s3'; import sha1 from 'sha1'; +import { generateTaskName } from '@lagoon/commons/dist/util'; const accessKeyId = process.env.S3_FILES_ACCESS_KEY_ID || 'minio' const secretAccessKey = process.env.S3_FILES_SECRET_ACCESS_KEY || 'minio123' @@ -97,7 +98,7 @@ export const getTaskLog: ResolverFn = async ( export const getTasksByEnvironmentId: ResolverFn = async ( { id: eid }, - { id: filterId, limit }, + { id: filterId, taskName: taskName, limit }, { sqlClientPool, hasPermission } ) => { const environment = await environmentHelpers( @@ -116,6 +117,10 @@ export const getTasksByEnvironmentId: ResolverFn = async ( queryBuilder = queryBuilder.andWhere('id', filterId); } + if (taskName) { + queryBuilder = queryBuilder.andWhere('task_name', taskName); + } + if (limit) { queryBuilder = queryBuilder.limit(limit); } @@ -123,6 +128,30 @@ export const getTasksByEnvironmentId: ResolverFn = async ( return query(sqlClientPool, queryBuilder.toString()); }; +export const getTaskByTaskName: ResolverFn = async ( + root, + { taskName }, + { sqlClientPool, hasPermission } +) => { + const queryString = knex('task') + .where('task_name', '=', taskName) + .toString(); + + const rows = await query(sqlClientPool, queryString); + const task = R.prop(0, rows); + + if (!task) { + return null; + } + + const rowsPerms = await query(sqlClientPool, Sql.selectPermsForTask(task.id)); + await hasPermission('task', 'view', { + project: R.path(['0', 'pid'], rowsPerms) + }); + + return task; +}; + export const getTaskByRemoteId: ResolverFn = async ( root, { id }, @@ -208,6 +237,8 @@ export const addTask: ResolverFn = async ( execute = true; } + let taskName = generateTaskName() + userActivityLogger(`User added task '${name}'`, { project: '', event: 'api:addTask', @@ -215,6 +246,7 @@ export const addTask: ResolverFn = async ( input: { id, name, + taskName, status, created, started, @@ -231,6 +263,7 @@ export const addTask: ResolverFn = async ( const taskData = await Helpers(sqlClientPool).addTask({ id, name, + taskName, status, created, started, @@ -393,6 +426,7 @@ TOKEN="$(ssh -p $TASK_SSH_PORT -t lagoon@$TASK_SSH_HOST token)" && curl -sS "$TA const taskData = await Helpers(sqlClientPool).addTask({ name: 'Drush archive-dump', + taskName: generateTaskName(), environment: environmentId, service: 'cli', command, @@ -441,6 +475,7 @@ TOKEN="$(ssh -p $TASK_SSH_PORT -t lagoon@$TASK_SSH_HOST token)" && curl -sS "$TA const taskData = await Helpers(sqlClientPool).addTask({ name: 'Drush sql-dump', + taskName: generateTaskName(), environment: environmentId, service: 'cli', command, @@ -491,6 +526,7 @@ export const taskDrushCacheClear: ResolverFn = async ( const taskData = await Helpers(sqlClientPool).addTask({ name: 'Drush cache-clear', + taskName: generateTaskName(), environment: environmentId, service: 'cli', command, @@ -530,6 +566,7 @@ export const taskDrushCron: ResolverFn = async ( const taskData = await Helpers(sqlClientPool).addTask({ name: 'Drush cron', + taskName: generateTaskName(), environment: environmentId, service: 'cli', command: `drush cron`, @@ -601,6 +638,7 @@ export const taskDrushSqlSync: ResolverFn = async ( const taskData = await Helpers(sqlClientPool).addTask({ name: `Sync DB ${sourceEnvironment.name} -> ${destinationEnvironment.name}`, + taskName: generateTaskName(), environment: destinationEnvironmentId, service: 'cli', command: command, @@ -672,6 +710,7 @@ export const taskDrushRsyncFiles: ResolverFn = async ( const taskData = await Helpers(sqlClientPool).addTask({ name: `Sync files ${sourceEnvironment.name} -> ${destinationEnvironment.name}`, + taskName: generateTaskName(), environment: destinationEnvironmentId, service: 'cli', command: command, @@ -711,6 +750,7 @@ export const taskDrushUserLogin: ResolverFn = async ( const taskData = await Helpers(sqlClientPool).addTask({ name: 'Drush uli', + taskName: generateTaskName(), environment: environmentId, service: 'cli', command: `drush uli`, diff --git a/services/api/src/resources/task/sql.ts b/services/api/src/resources/task/sql.ts index fd5358cc76..ee1c3d2a03 100644 --- a/services/api/src/resources/task/sql.ts +++ b/services/api/src/resources/task/sql.ts @@ -8,6 +8,7 @@ export const Sql = { insertTask: ({ id, name, + taskName, status, created, started, @@ -22,6 +23,7 @@ export const Sql = { }: { id: number; name: string; + taskName: string, status: string; created: string; started: string; @@ -38,6 +40,7 @@ export const Sql = { .insert({ id, name, + taskName, status, created, started, diff --git a/services/api/src/resources/task/task_definition_resolvers.ts b/services/api/src/resources/task/task_definition_resolvers.ts index 471dec54dd..1d67c17a4b 100644 --- a/services/api/src/resources/task/task_definition_resolvers.ts +++ b/services/api/src/resources/task/task_definition_resolvers.ts @@ -19,6 +19,7 @@ import convertDateToMYSQLDateTimeFormat from '../../util/convertDateToMYSQLDateT import * as advancedTaskToolbox from './advancedtasktoolbox'; import { IKeycloakAuthAttributes, KeycloakUnauthorizedError } from '../../util/auth'; import { Environment } from '../../resolvers'; +import { generateTaskName } from '@lagoon/commons/dist/util'; enum AdvancedTaskDefinitionTarget { Group, @@ -532,6 +533,7 @@ export const invokeRegisteredTask = async ( const taskData = await Helpers(sqlClientPool).addTask({ name: task.name, + taskName: generateTaskName(), environment: environment, service: task.service, command: taskCommand, @@ -554,6 +556,7 @@ export const invokeRegisteredTask = async ( const advancedTaskData = await Helpers(sqlClientPool).addAdvancedTask({ name: task.name, + taskName: generateTaskName(), created: undefined, started: undefined, completed: undefined, diff --git a/services/api/src/typeDefs.js b/services/api/src/typeDefs.js index 56cb9f35a9..f881c2585e 100644 --- a/services/api/src/typeDefs.js +++ b/services/api/src/typeDefs.js @@ -70,6 +70,12 @@ const typeDefs = gql` ACTIVE SUCCEEDED FAILED + NEW + PENDING + RUNNING + CANCELLED + ERROR + COMPLETE } enum RestoreStatusType { @@ -829,7 +835,7 @@ const typeDefs = gql` monitoringUrls: String deployments(name: String, limit: Int): [Deployment] backups(includeDeleted: Boolean, limit: Int): [Backup] - tasks(id: Int, limit: Int): [Task] + tasks(id: Int, taskName: String, limit: Int): [Task] advancedTasks: [AdvancedTaskDefinition] services: [EnvironmentService] problems(severity: [ProblemSeverityRating], source: [String]): [Problem] @@ -915,6 +921,7 @@ const typeDefs = gql` type Task { id: Int name: String + taskName: String status: String created: String started: String @@ -930,6 +937,7 @@ const typeDefs = gql` type AdvancedTask { id: Int name: String + taskName: String status: String created: String started: String @@ -1067,6 +1075,7 @@ const typeDefs = gql` ): Environment deploymentByRemoteId(id: String): Deployment deploymentsByBulkId(bulkId: String): [Deployment] + taskByTaskName(taskName: String): Task taskByRemoteId(id: String): Task taskById(id: Int): Task """ @@ -1401,6 +1410,7 @@ const typeDefs = gql` input UpdateTaskPatchInput { name: String + taskName: String status: TaskStatusType created: String started: String From 6f62af19061222e43906b2ab0ef8408afffb8c68 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Tue, 12 Apr 2022 08:28:05 +1000 Subject: [PATCH 06/16] chore: remove jobstatus switch as not required --- services/controllerhandler/src/index.ts | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/services/controllerhandler/src/index.ts b/services/controllerhandler/src/index.ts index 81ee15325e..2430d28595 100644 --- a/services/controllerhandler/src/index.ts +++ b/services/controllerhandler/src/index.ts @@ -38,30 +38,14 @@ const updateLagoonTask = async (meta) => { const dateOrNull = R.unless(R.isNil, convertDateFormat) as any; let completedDate = dateOrNull(meta.endTime) as any; - if (meta.jobStatus === 'failed') { + if (meta.jobStatus.toUpperCase() === 'FAILED' || meta.jobStatus.toUpperCase() === 'CANCELLED') { completedDate = dateOrNull(meta.endTime); } - // transform the jobstatus into one the API knows about - let jobStatus = 'active'; - switch (meta.jobStatus) { - case 'pending': - jobStatus = 'active' - break; - case 'running': - jobStatus = 'active' - break; - case 'complete': - jobStatus = 'succeeded' - break; - default: - jobStatus = meta.jobStatus - break; - } // update the actual task now await updateTask(Number(meta.task.id), { remoteId: meta.remoteId, - status: jobStatus.toUpperCase(), + status: meta.jobStatus.toUpperCase(), started: dateOrNull(meta.startTime), completed: completedDate }); From 58e7b12b6ccfd87f2151dfeddc6ee7e1c3d30a1a Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Tue, 12 Apr 2022 08:28:35 +1000 Subject: [PATCH 07/16] feat: support taskname slug in the ui --- services/ui/server.js | 2 +- .../ui/src/components/Breadcrumbs/Task.js | 17 ++++++++++ services/ui/src/components/Task/index.js | 31 ++++++++++++++++++- services/ui/src/components/Tasks/index.js | 26 ++++++++++++++-- .../ui/src/components/errors/TaskNotFound.js | 2 +- .../components/errors/TaskNotFound.stories.js | 2 +- services/ui/src/components/link/Task.js | 2 +- .../ui/src/lib/query/EnvironmentWithTask.js | 5 +-- .../ui/src/lib/query/EnvironmentWithTasks.js | 1 + services/ui/src/pages/stories/task.stories.js | 2 +- services/ui/src/pages/task.js | 11 +++++-- 11 files changed, 88 insertions(+), 13 deletions(-) create mode 100644 services/ui/src/components/Breadcrumbs/Task.js diff --git a/services/ui/server.js b/services/ui/server.js index ead7c0e707..a7a468f922 100644 --- a/services/ui/server.js +++ b/services/ui/server.js @@ -87,7 +87,7 @@ app (req, res) => { app.render(req, res, '/task', { openshiftProjectName: req.params.environmentSlug, - taskId: req.params.taskSlug + taskName: req.params.taskSlug }); } ); diff --git a/services/ui/src/components/Breadcrumbs/Task.js b/services/ui/src/components/Breadcrumbs/Task.js new file mode 100644 index 0000000000..0b4311c1eb --- /dev/null +++ b/services/ui/src/components/Breadcrumbs/Task.js @@ -0,0 +1,17 @@ +import React from 'react'; +import { getLinkData } from 'components/link/Task'; +import Breadcrumb from 'components/Breadcrumbs/Breadcrumb'; + +const TaskBreadcrumb = ({ taskName, taskSlug, environmentSlug, projectSlug }) => { + const linkData = getLinkData(taskSlug, environmentSlug, projectSlug); + + return ( + + ); +}; + +export default TaskBreadcrumb; diff --git a/services/ui/src/components/Task/index.js b/services/ui/src/components/Task/index.js index 79398a0714..177d52ffd8 100644 --- a/services/ui/src/components/Task/index.js +++ b/services/ui/src/components/Task/index.js @@ -9,7 +9,6 @@ import { bp } from 'lib/variables'; const Task = ({ task }) => (
-

{task.name}

@@ -138,17 +137,47 @@ const Task = ({ task }) => ( } } + &.new { + &::before { + background-image: url('/static/images/in-progress.svg'); + } + } + + &.pending { + &::before { + background-image: url('/static/images/in-progress.svg'); + } + } + + &.running { + &::before { + background-image: url('/static/images/in-progress.svg'); + } + } + &.failed { &::before { background-image: url('/static/images/failed.svg'); } } + &.cancelled { + &::before { + background-image: url('/static/images/failed.svg'); + } + } + &.succeeded { &::before { background-image: url('/static/images/successful.svg'); } } + + &.complete { + &::before { + background-image: url('/static/images/successful.svg'); + } + } } & > div { diff --git a/services/ui/src/components/Tasks/index.js b/services/ui/src/components/Tasks/index.js index 6c053e0a3a..2d00d11bd9 100644 --- a/services/ui/src/components/Tasks/index.js +++ b/services/ui/src/components/Tasks/index.js @@ -18,12 +18,12 @@ const Tasks = ({ tasks, environmentSlug, projectSlug }) => ( {!tasks.length &&
No Tasks
} {tasks.map(task => ( -
+
{task.name}
{moment @@ -148,14 +148,34 @@ const Tasks = ({ tasks, environmentSlug, projectSlug }) => ( background-image: url('/static/images/in-progress.svg'); } + &.new { + background-image: url('/static/images/in-progress.svg'); + } + + &.pending { + background-image: url('/static/images/in-progress.svg'); + } + + &.running { + background-image: url('/static/images/in-progress.svg'); + } + &.failed { background-image: url('/static/images/failed.svg'); } + &.cancelled { + background-image: url('/static/images/failed.svg'); + } + &.succeeded { background-image: url('/static/images/successful.svg'); } + &.complete { + background-image: url('/static/images/successful.svg'); + } + span { @media ${bp.tiny_wide} { display: none; diff --git a/services/ui/src/components/errors/TaskNotFound.js b/services/ui/src/components/errors/TaskNotFound.js index b722bd4a09..f64ce819ed 100644 --- a/services/ui/src/components/errors/TaskNotFound.js +++ b/services/ui/src/components/errors/TaskNotFound.js @@ -4,6 +4,6 @@ import ErrorPage from 'pages/_error'; export default ({ variables }) => ( ); diff --git a/services/ui/src/components/errors/TaskNotFound.stories.js b/services/ui/src/components/errors/TaskNotFound.stories.js index be4c08e639..fc079ba08c 100644 --- a/services/ui/src/components/errors/TaskNotFound.stories.js +++ b/services/ui/src/components/errors/TaskNotFound.stories.js @@ -9,7 +9,7 @@ export default { export const Default = () => ( ); diff --git a/services/ui/src/components/link/Task.js b/services/ui/src/components/link/Task.js index 25de9eb243..6fe9db0b79 100644 --- a/services/ui/src/components/link/Task.js +++ b/services/ui/src/components/link/Task.js @@ -5,7 +5,7 @@ export const getLinkData = (taskSlug, environmentSlug, projectSlug) => ({ pathname: '/task', query: { openshiftProjectName: environmentSlug, - taskId: taskSlug + taskName: taskSlug } }, asPath: `/projects/${projectSlug}/${environmentSlug}/tasks/${taskSlug}` diff --git a/services/ui/src/lib/query/EnvironmentWithTask.js b/services/ui/src/lib/query/EnvironmentWithTask.js index 87659011dd..4d0f09b056 100644 --- a/services/ui/src/lib/query/EnvironmentWithTask.js +++ b/services/ui/src/lib/query/EnvironmentWithTask.js @@ -1,7 +1,7 @@ import gql from 'graphql-tag'; export default gql` - query getEnvironment($openshiftProjectName: String!, $taskId: Int!) { + query getEnvironment($openshiftProjectName: String!, $taskName: String!) { environment: environmentByOpenshiftProjectName( openshiftProjectName: $openshiftProjectName ) { @@ -13,8 +13,9 @@ export default gql` problemsUi factsUi } - tasks(id: $taskId) { + tasks(taskName: $taskName) { name + taskName status created service diff --git a/services/ui/src/lib/query/EnvironmentWithTasks.js b/services/ui/src/lib/query/EnvironmentWithTasks.js index ba462fa74a..0e1220ac39 100644 --- a/services/ui/src/lib/query/EnvironmentWithTasks.js +++ b/services/ui/src/lib/query/EnvironmentWithTasks.js @@ -59,6 +59,7 @@ export default gql` tasks(limit: $limit) { id name + taskName status created service diff --git a/services/ui/src/pages/stories/task.stories.js b/services/ui/src/pages/stories/task.stories.js index e4612610b2..fb0f9809cb 100644 --- a/services/ui/src/pages/stories/task.stories.js +++ b/services/ui/src/pages/stories/task.stories.js @@ -11,7 +11,7 @@ export const Default = () => ( router={{ query: { openshiftProjectName: 'Example', - taskId: 42, + taskName: 'lagoon-task-abcdef', }, }} /> diff --git a/services/ui/src/pages/task.js b/services/ui/src/pages/task.js index 3a4ec08591..fd00a00009 100644 --- a/services/ui/src/pages/task.js +++ b/services/ui/src/pages/task.js @@ -8,6 +8,7 @@ import EnvironmentWithTaskQuery from 'lib/query/EnvironmentWithTask'; import Breadcrumbs from 'components/Breadcrumbs'; import ProjectBreadcrumb from 'components/Breadcrumbs/Project'; import EnvironmentBreadcrumb from 'components/Breadcrumbs/Environment'; +import TaskBreadcrumb from 'components/Breadcrumbs/Task'; import NavTabs from 'components/NavTabs'; import Task from 'components/Task'; import withQueryLoading from 'lib/withQueryLoading'; @@ -24,13 +25,13 @@ import { bp } from 'lib/variables'; export const PageTask = ({ router }) => ( <> - {`${router.query.taskId} | Task`} + {`${router.query.taskName} | Task`} {R.compose( @@ -46,6 +47,12 @@ export const PageTask = ({ router }) => ( environmentSlug={environment.openshiftProjectName} projectSlug={environment.project.name} /> +
From 90154faedca7cbfd44147c7e1200900837858b4a Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Tue, 12 Apr 2022 17:07:54 +1000 Subject: [PATCH 08/16] fix: remove unneeded sql --- services/api-db/docker-entrypoint-initdb.d/01-migrations.sql | 3 --- 1 file changed, 3 deletions(-) diff --git a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql index c874ffcd8f..dacf294a74 100644 --- a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql +++ b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql @@ -1696,9 +1696,6 @@ CREATE OR REPLACE PROCEDURE t.task_name = CONCAT('lagoon-task-', (SELECT LEFT(UUID(), 6))) WHERE t.task_name IS NULL; -END WHILE; - - END; $$ From aed4df09bbdac2b8ec96442f2be713eb25c9e1cd Mon Sep 17 00:00:00 2001 From: Toby Bellwood Date: Fri, 22 Apr 2022 09:55:58 +1000 Subject: [PATCH 09/16] fix errors in migrations sql --- services/api-db/docker-entrypoint-initdb.d/01-migrations.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql index 6fd00ae9d6..1966a4b5de 100644 --- a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql +++ b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql @@ -1603,7 +1603,7 @@ CREATE OR REPLACE PROCEDURE table_name = 'advanced_task_definition_argument' AND column_name = 'display_name' ) THEN - ALTER TABLE `advanced_task_definition` + ALTER TABLE `advanced_task_definition_argument` ADD `display_name` varchar(500) NULL; END IF; END; @@ -1626,7 +1626,7 @@ CREATE OR REPLACE PROCEDURE column_type_argument_type = "enum('ssh-rsa','ssh-ed25519')" ) THEN ALTER TABLE ssh_key - MODIFY type ENUM('ssh-rsa', 'ssh-ed25519','ecdsa-sha2-nistp256','ecdsa-sha2-nistp384','ecdsa-sha2-nistp521'); + MODIFY type ENUM('ssh-rsa','ssh-ed25519','ecdsa-sha2-nistp256','ecdsa-sha2-nistp384','ecdsa-sha2-nistp521'); END IF; END; $$ From 13cee409a73dba2553a00475aa71dc07fc2a6a48 Mon Sep 17 00:00:00 2001 From: Toby Bellwood Date: Fri, 22 Apr 2022 10:07:40 +1000 Subject: [PATCH 10/16] actually fix the correct column this time --- services/api-db/docker-entrypoint-initdb.d/01-migrations.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql index 1966a4b5de..beee9d2be4 100644 --- a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql +++ b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql @@ -1626,7 +1626,7 @@ CREATE OR REPLACE PROCEDURE column_type_argument_type = "enum('ssh-rsa','ssh-ed25519')" ) THEN ALTER TABLE ssh_key - MODIFY type ENUM('ssh-rsa','ssh-ed25519','ecdsa-sha2-nistp256','ecdsa-sha2-nistp384','ecdsa-sha2-nistp521'); + MODIFY key_type ENUM('ssh-rsa','ssh-ed25519','ecdsa-sha2-nistp256','ecdsa-sha2-nistp384','ecdsa-sha2-nistp521'); END IF; END; $$ From 45f288bf1be112d572d1ae0b3718b13dc341133c Mon Sep 17 00:00:00 2001 From: Toby Bellwood Date: Fri, 22 Apr 2022 10:42:52 +1000 Subject: [PATCH 11/16] rework key_type MODIFY string to match --- services/api-db/docker-entrypoint-initdb.d/01-migrations.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql index beee9d2be4..8a5a1f42f0 100644 --- a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql +++ b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql @@ -1626,7 +1626,7 @@ CREATE OR REPLACE PROCEDURE column_type_argument_type = "enum('ssh-rsa','ssh-ed25519')" ) THEN ALTER TABLE ssh_key - MODIFY key_type ENUM('ssh-rsa','ssh-ed25519','ecdsa-sha2-nistp256','ecdsa-sha2-nistp384','ecdsa-sha2-nistp521'); + MODIFY key_type ENUM('ssh-rsa', 'ssh-ed25519','ecdsa-sha2-nistp256','ecdsa-sha2-nistp384','ecdsa-sha2-nistp521') NOT NULL DEFAULT 'ssh-rsa'; END IF; END; $$ From 51fe07d353be1ca8d0a2058f58ab847e9f55ed20 Mon Sep 17 00:00:00 2001 From: Toby Bellwood Date: Fri, 22 Apr 2022 14:32:23 +1000 Subject: [PATCH 12/16] CALL add_new_task_status_types --- services/api-db/docker-entrypoint-initdb.d/01-migrations.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql index 8a5a1f42f0..382600dd63 100644 --- a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql +++ b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql @@ -1803,6 +1803,7 @@ CALL change_name_index_for_advanced_task_argument(); CALL add_confirmation_text_to_advanced_task_def(); CALL add_display_name_to_advanced_task_argument(); CALL add_ecdsa_ssh_key_types(); +CALL add_task_name_to_tasks(); CALL add_new_task_status_types(); CALL update_active_succeeded_tasks(); CALL update_missing_tasknames(); From 52f67d787f7df1ef2aa533559e60d97e20fb93af Mon Sep 17 00:00:00 2001 From: Toby Bellwood Date: Wed, 27 Apr 2022 12:41:54 +1000 Subject: [PATCH 13/16] cluster_permissions has no allowed_actions --- services/api/src/resources/group/opendistroSecurity.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/services/api/src/resources/group/opendistroSecurity.ts b/services/api/src/resources/group/opendistroSecurity.ts index f218b96ea0..12e0e0b894 100644 --- a/services/api/src/resources/group/opendistroSecurity.ts +++ b/services/api/src/resources/group/opendistroSecurity.ts @@ -43,9 +43,7 @@ export const OpendistroSecurityOperations = ( const groupProjectPermissions = { body: { cluster_permissions: [ - { - allowed_actions: ['cluster:admin/opendistro/reports/menu/download'] - } + 'cluster:admin/opendistro/reports/menu/download' ], index_permissions: [ { From a51fa3980e50e321aa11bcc67855f51d84ce15a8 Mon Sep 17 00:00:00 2001 From: Toby Bellwood Date: Wed, 27 Apr 2022 19:49:48 +1000 Subject: [PATCH 14/16] add roleMapping to tenant creation step --- services/api/src/resources/group/opendistroSecurity.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/services/api/src/resources/group/opendistroSecurity.ts b/services/api/src/resources/group/opendistroSecurity.ts index 12e0e0b894..3e269137ab 100644 --- a/services/api/src/resources/group/opendistroSecurity.ts +++ b/services/api/src/resources/group/opendistroSecurity.ts @@ -94,6 +94,14 @@ export const OpendistroSecurityOperations = ( logger.debug(`${groupName}: Created Tenant "${tenantName}"`); } catch (err) { logger.error(`Opendistro-Security create tenant error: ${err}`); + }; + + try { + // Create a new RoleMapping for this Group + await opendistroSecurityClient.put(`rolesmapping/${tenantName}`, { body: { backend_roles: [`${tenantName}`] } }); + logger.debug(`${groupName}: Created RoleMapping "${tenantName}"`); + } catch (err) { + logger.error(`Opendistro-Security create rolemapping error: ${err}`); } } From 7515d0bc7d5f67019aef2039e8b9fcd4a2136bf8 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Thu, 28 Apr 2022 14:41:57 +1000 Subject: [PATCH 15/16] chore: fix up counter check for dbaas provisioning --- .../scripts/exec-kubectl-mariadb-dbaas.sh | 2 +- .../scripts/exec-kubectl-postgres-dbaas.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubectl-mariadb-dbaas.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubectl-mariadb-dbaas.sh index 30dae3d91d..aa2a20df8f 100644 --- a/images/kubectl-build-deploy-dind/scripts/exec-kubectl-mariadb-dbaas.sh +++ b/images/kubectl-build-deploy-dind/scripts/exec-kubectl-mariadb-dbaas.sh @@ -8,7 +8,7 @@ OPERATOR_TIMEOUT=180 until kubectl -n ${NAMESPACE} get mariadbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.database do if [ $OPERATOR_COUNTER -lt $OPERATOR_TIMEOUT ]; then - let SERVICE_BROKER_COUNTER=SERVICE_BROKER_COUNTER+1 + let OPERATOR_COUNTER=OPERATOR_COUNTER+1 echo "Service for ${SERVICE_NAME} not available yet, waiting for 5 secs" sleep 5 else diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubectl-postgres-dbaas.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubectl-postgres-dbaas.sh index ea49292be0..283528892b 100644 --- a/images/kubectl-build-deploy-dind/scripts/exec-kubectl-postgres-dbaas.sh +++ b/images/kubectl-build-deploy-dind/scripts/exec-kubectl-postgres-dbaas.sh @@ -8,7 +8,7 @@ OPERATOR_TIMEOUT=180 until kubectl -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.database do if [ $OPERATOR_COUNTER -lt $OPERATOR_TIMEOUT ]; then - let SERVICE_BROKER_COUNTER=SERVICE_BROKER_COUNTER+1 + let OPERATOR_COUNTER=OPERATOR_COUNTER+1 echo "Service for ${SERVICE_NAME} not available yet, waiting for 5 secs" sleep 5 else From 47433c12d954281a1ea3d336b35daea519c7ebf6 Mon Sep 17 00:00:00 2001 From: Toby Bellwood Date: Mon, 2 May 2022 10:36:33 +1000 Subject: [PATCH 16/16] more explicit controller robot actions --- .../kubectl-build-deploy-dind/build-deploy.sh | 25 +++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/images/kubectl-build-deploy-dind/build-deploy.sh b/images/kubectl-build-deploy-dind/build-deploy.sh index 292924c705..e3225c3b24 100755 --- a/images/kubectl-build-deploy-dind/build-deploy.sh +++ b/images/kubectl-build-deploy-dind/build-deploy.sh @@ -58,14 +58,25 @@ kubectl config set-cluster kubernetes.default.svc --server=https://kubernetes.de kubectl config set-context default/lagoon/kubernetes.default.svc --user=lagoon/kubernetes.default.svc --namespace="${NAMESPACE}" --cluster=kubernetes.default.svc kubectl config use-context default/lagoon/kubernetes.default.svc -if [ ! -z ${INTERNAL_REGISTRY_URL} ] && [ ! -z ${INTERNAL_REGISTRY_USERNAME} ] && [ ! -z ${INTERNAL_REGISTRY_PASSWORD} ] ; then - echo "docker login -u '${INTERNAL_REGISTRY_USERNAME}' -p '${INTERNAL_REGISTRY_PASSWORD}' ${INTERNAL_REGISTRY_URL}" | /bin/bash - # create lagoon-internal-registry-secret if it does not exist yet - if ! kubectl -n ${NAMESPACE} get secret lagoon-internal-registry-secret &> /dev/null; then - kubectl create secret docker-registry lagoon-internal-registry-secret --docker-server=${INTERNAL_REGISTRY_URL} --docker-username=${INTERNAL_REGISTRY_USERNAME} --docker-password=${INTERNAL_REGISTRY_PASSWORD} --dry-run -o yaml | kubectl apply -f - +if [ ! -z ${INTERNAL_REGISTRY_URL} ] ; then + echo "Creating secret for internal registry access" + if [ ! -z ${INTERNAL_REGISTRY_USERNAME} ] && [ ! -z ${INTERNAL_REGISTRY_PASSWORD} ] ; then + echo "docker login -u '${INTERNAL_REGISTRY_USERNAME}' -p '${INTERNAL_REGISTRY_PASSWORD}' ${INTERNAL_REGISTRY_URL}" | /bin/bash + # create lagoon-internal-registry-secret if it does not exist yet + if ! kubectl -n ${NAMESPACE} get secret lagoon-internal-registry-secret &> /dev/null; then + kubectl create secret docker-registry lagoon-internal-registry-secret --docker-server=${INTERNAL_REGISTRY_URL} --docker-username=${INTERNAL_REGISTRY_USERNAME} --docker-password=${INTERNAL_REGISTRY_PASSWORD} --dry-run -o yaml | kubectl apply -f - + fi + REGISTRY_SECRETS+=("lagoon-internal-registry-secret") + REGISTRY=$INTERNAL_REGISTRY_URL # This will handle pointing Lagoon at the correct registry for non local builds + echo "Set internal registry secrets for token ${INTERNAL_REGISTRY_USERNAME} in ${REGISTRY}" + else + if [ ! $INTERNAL_REGISTRY_USERNAME ]; then + echo "No token created for registry ${INTERNAL_REGISTRY_URL}"; exit 1; + fi + if [ ! $INTERNAL_REGISTRY_PASSWORD ]; then + echo "No password retrieved for token ${INTERNAL_REGISTRY_USERNAME} in registry ${INTERNAL_REGISTRY_URL}"; exit 1; + fi fi - REGISTRY_SECRETS+=("lagoon-internal-registry-secret") - REGISTRY=$INTERNAL_REGISTRY_URL # This will handle pointing Lagoon at the correct registry for non local builds fi ##############################################