diff --git a/instance-applications/120-ibm-db2u-database/binary_files/db2shc b/instance-applications/120-ibm-db2u-database/binary_files/db2shc new file mode 100755 index 000000000..8d829b259 Binary files /dev/null and b/instance-applications/120-ibm-db2u-database/binary_files/db2shc differ diff --git a/instance-applications/120-ibm-db2u-database/files/.Prompt b/instance-applications/120-ibm-db2u-database/files/.Prompt new file mode 100755 index 000000000..db4b2912d --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/.Prompt @@ -0,0 +1,3 @@ +#!/bin/bash +CUST=`db2 list applications for db bludb show detail | awk '{print $1}' | grep -vi db2inst | grep -vi ctginst | tail -2 | head -1` +PS1='[${USER}@${HOSTNAME} - ${CUST} ${PWD##*/}]\$ ' diff --git a/instance-applications/120-ibm-db2u-database/files/CheckCOS.sh b/instance-applications/120-ibm-db2u-database/files/CheckCOS.sh new file mode 100755 index 000000000..71305618e --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/CheckCOS.sh @@ -0,0 +1,17 @@ +. /mnt/backup/bin/.PROPS + +echo " " +echo " ##### BUCKET = ${CONTAINER} #####" +echo " " + + +. /mnt/backup/bin/.PROPS + +DB2V=`db2level | grep Inform | awk '{print $5}' | sed 's/",//'` +if [ ${DB2V} = "v11.5.7.0" ] + then + + db2RemStgManager S3 list server=${SERVER} auth1=${PARM1} auth2=${PARM2} container=${CONTAINER} +else + db2RemStgManager ALIAS LIST source=DB2REMOTE://AWSCOS// +fi diff --git a/instance-applications/120-ibm-db2u-database/files/CopyDBScripts.sh b/instance-applications/120-ibm-db2u-database/files/CopyDBScripts.sh new file mode 100644 index 000000000..0395ae254 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/CopyDBScripts.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# Finding the Instance owner +INSTOWNER=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | awk -F ',' '{print $4}' ` + +# Finding Instnace owner Group +GRPID=`cat /etc/passwd | grep ${INSTOWNER} | cut -d: -f4` +INSTGROUP=`cat /etc/group | grep ${GRPID} | cut -d: -f1` + +# Find the home directory +INSTHOME=` cat /etc/passwd | grep ${INSTOWNER} | cut -d: -f6` + +# Resolving INSTOWNER's executables path (sqllib): +DBPATH=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | grep "${INSTOWNER}" | awk -F ',' '{print $5}' ` + +# Source the db2profile for the root user to be able to issue several db2 commands locally: +SOURCEPATH="$DBPATH/db2profile" +. $SOURCEPATH + +cd /tmp/db2-scripts/ + +echo -e "\nCopying the files to bin directory under Instance Home . . . " +cp -rp .Prompt ${INSTHOME}/ +cp -rp CheckCOS.sh ${INSTHOME}/bin/ +cp -rp DB2_Backup.sh ${INSTHOME}/bin/ +cp -rp Run_Backup.sh ${INSTHOME}/bin/ +cp -rp RUNEXPORT.sh ${INSTHOME}/bin/ +cp -rp Explain.ddl ${INSTHOME}/bin/ +cp -rp RUN_OnDemandFULL_BKP.sh ${INSTHOME}/bin/ +cp -rp runstats_rebind.sh ${INSTHOME}/bin/ +cp -rp CreateRoles.sh ${INSTHOME}/bin/ +cp -rp grant_check.sh ${INSTHOME}/bin/ +cp -rp reorgTablesIndexesInplace2.sh ${INSTHOME}/bin/ +cp -rp extract_authorization.sh ${INSTHOME}/bin +cp -rp HADRMON.sh ${INSTHOME}/bin + +echo -e "\nCopying the file to bin/ITCS104 directory under Instance Home . . ." +cp -rp FixInvalidObjects.sh ${INSTHOME}/bin/ITCS104/ + +echo -e "\nCopying files to /mnt/backup/bin directory . . ."; +sudo cp -rp cronRunBKP.sh /mnt/backup/bin/ +sudo chown db2uadm:wheel /mnt/backup/bin/cronRunBKP.sh + +echo -e "\nCopying files to Managed directory under Instance Home . . ."; +cp -rp Set_DB_COS_Storage.sh ${INSTHOME}/Managed/ +cp -rp Reg-Large_TBSP.sh ${INSTHOME}/Managed/ +cp PostBackFlow.sh ${INSTHOME}/Managed +cp OwnerCheck.txt ${INSTHOME}/Managed + +echo -e "\nCopying files to maintenance directory under Instance Home . . . "; +cp -rp reorgTablesIndexesInplace2_maintenance.sh ${INSTHOME}/maintenance/reorgTablesIndexesInplace2.sh +if [ ! -d ${INSTHOME}/maintenance/logs ] ; then + mkdir -p ${INSTHOME}/maintenance/logs + echo "${DATETIME}:Creating directory ${INSTHOME}/maintenance/logs" + if [ $? != "0" ] ; then + echo "${DATETIME}: ERROR: Unable to create directory ${INSTHOME}/maintenance/logs" + exit 1 + fi +fi + +sudo chown -R ${INSTOWNER}:${INSTGROUP} ${INSTHOME}/bin +sudo chown -R ${INSTOWNER}:${INSTGROUP} ${INSTHOME}/maintenance +sudo chown -R ${INSTOWNER}:${INSTGROUP} ${INSTHOME}/Managed \ No newline at end of file diff --git a/instance-applications/120-ibm-db2u-database/files/CreateRoles.sh b/instance-applications/120-ibm-db2u-database/files/CreateRoles.sh new file mode 100755 index 000000000..21e2ef989 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/CreateRoles.sh @@ -0,0 +1,125 @@ +#!/bin/bash +## CreateRoles.sh +########## ${SCHEMANAME} ######## +################################################################################ +# +# Usage: ./CreateRoles.sh +# +# +################################################################################ + +##Possibly need to grant the following on the non flex databases +## db2 "grant execute on package nullid.SQLC2K26 to role maximo_read" +## db2 "GRANT USAGE ON WORKLOAD SYSDEFAULTUSERWORKLOAD role maximo_read" +## db2 "grant execute on package nullid.SYSSH200 to role maximo_read" +## db2 grant select on syscat.schemata to role maximo_read +## db2 grant select on syscat.tables to role maximo_read +## db2 grant select on syscat.indexes to role maximo_read +## db2 grant select on syscat.columns to role maximo_read + +#set -x +db2 connect to bludb + +SCHEMANAME=MAXIMO + +DATETIME=`date +%Y%m%d_%H%M%S`; + +ROLES=`db2 -x "select char(ROLENAME,30) as ROLENAME from syscat.roles"` +ROLE="${SCHEMANAME}_read" +echo "" > temp +if ! grep -iqw "${ROLE}" <<< "${ROLES}" ; then + echo "create role ${SCHEMANAME}_read;" > temp +fi +USER=${SCHEMANAME}_READ +WRITE=${SCHEMANAME}_WRITE + +db2 "select +'GRANT SELECT ON TABLE '|| +RTRIM(TABSCHEMA) || '.\"' || RTRIM(tabname)||'\" TO ROLE ${USER};' +from +syscat.tables +where tabschema = '${SCHEMANAME}'" >> temp + + +db2 "select +'GRANT SELECT ON table '|| +RTRIM(viewSCHEMA) || '.' || RTRIM(viewname)||' TO ROLE ${USER};' +from +syscat.views +where viewschema = '${SCHEMANAME}'" >> temp +echo "grant selectin on schema MAXIMO to role MAXIMO_READ;" >> temp + + + +cat temp | grep -i ${SCHEMANAME}_read > ${USER}.sql +rm temp +echo "GRANT CONNECT ON DATABASE TO ROLE ${USER};" >>${USER}.sql +#echo "GRANT USE OF TABLESPACE MAXDATA TO ROLE ${USER};" >> ${USER}.sql +db2 -tvf ${USER}.sql > ${USER}_${DATETIME}.out + +echo "" > temp +ROLE="${SCHEMANAME}_write" +if ! grep -iqw "${ROLE}" <<< "${ROLES}" ; then + echo "create role ${SCHEMANAME}_write;" > temp +fi +echo "grant updatein on schema MAXIMO to role MAXIMO_WRITE;" >> temp +echo "grant deletein on schema MAXIMO to role MAXIMO_WRITE;" >> temp +echo "grant insertin on schema MAXIMO to role MAXIMO_WRITE;" >> temp +echo "grant selectin on schema MAXIMO to role MAXIMO_WRITE;" >> temp + +db2 "select +'GRANT SELECT, insert, update, delete ON TABLE '|| +RTRIM(TABSCHEMA) || '.\"' || RTRIM(tabname)||'\" TO ROLE ${WRITE};' +from +syscat.tables +where tabschema = '${SCHEMANAME}'" >> temp + + + + +cat temp | grep -i ${SCHEMANAME}_write > ${WRITE}.sql +rm temp +echo "GRANT CONNECT ON DATABASE TO ROLE ${USER};" >>${WRITE}.sql +#echo "GRANT USE OF TABLESPACE MAXDATA TO ROLE ${WRITE};" >> ${WRITE}.sql + +echo "" > temp +ROLE="${SCHEMANAME}_SEQ" +if ! grep -iqw "${ROLE}" <<< "${ROLES}" ; then + echo "create role ${SCHEMANAME}_SEQ;" > temp +fi +USER=${SCHEMANAME}_SEQ + + +db2 "select +'GRANT USAGE ON SEQUENCE '|| +RTRIM(SEQSCHEMA) || '.\"' || RTRIM(SEQNAME)||'\" TO ROLE ${USER};' +from syscat.sequences where seqschema = '${SCHEMANAME}'" >> temp + +cat temp | grep -i ${USER} > ${USER}.sql +rm temp +echo "GRANT CONNECT ON DATABASE TO ROLE ${USER};" >>${USER}.sql + + +db2 -tvf ${USER}.sql > ${USER}_${DATETIME}.out +db2 -tvf ${WRITE}.sql > ${WRITE}_${DATETIME}.out + + +db2 "grant selectin on schema MAXIMO to role MAXIMO_READ" +db2 "grant updatein on schema MAXIMO to role MAXIMO_WRITE" +db2 "grant deletein on schema MAXIMO to role MAXIMO_WRITE" +db2 "grant insertin on schema MAXIMO to role MAXIMO_WRITE" +db2 "grant selectin on schema MAXIMO to role MAXIMO_WRITE" + +echo "Creating the EXPLAIN ROLE" +ROLE="EXPLAIN" +if grep -iqw "${ROLE}" <<< "${ROLES}" ; then + + echo "${ROLE} is already present in the database ${DBNAME}"; + exit 1; +else + echo "${ROLE} is Not FOUND, proceeding with creating the role " + db2 -tvf Explain.ddl +fi + + +db2 terminate diff --git a/instance-applications/120-ibm-db2u-database/files/DB2_Backup.sh b/instance-applications/120-ibm-db2u-database/files/DB2_Backup.sh new file mode 100755 index 000000000..d8f21436c --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/DB2_Backup.sh @@ -0,0 +1,304 @@ +#!/bin/ksh +set -x + +######################################################### +# DB2_Backup.sh +# +# Things to do: +# Recovery history retention (days) (REC_HIS_RETENTN) = 0 >>> Need to set to 15 days +# +# +# The cron job on the cluster will supply the needed parameters for this script +# If an on demand backup (Full) is required, the DB2_Backup.sh script can be called with the following parameters +# +# Sample parameters +# ./DB2_Backup.sh <# of backups to keep on file system> +# ./DB2_Backup.sh ctginst1 BLUDB 15 full 2>>.BackupLOG.stderr > .BackupLOG.out +# ./DB2_Backup.sh ctginst1 BLUDB 15 inc 2>>.BackupLOG.stderr > .BackupLOG.out +# +######################################################### + + +. /mnt/backup/bin/.PROPS + +#### COSBACKUPBUCKET=masms-pp-1-cos-backup-pseg-test-pr-wdc +#### For testing +TESTMSG="######## TESTING ###########" +echo ${COSBACKUPBUCKET} +COSBACKUPBUCKET=${CONTAINER} +#### TESTING URL + +Server=`hostname` +instance=`whoami` +FULLIMAGE= +DATETIME=`date +%Y-%m-%d_%H%M%S`; +BACKUP_BASE=/mnt/backup +BACKUP_LOGS=${BACKUP_BASE}/${DB2INSTANCE} +BACKUP_PATH=DB2REMOTE://AWSCOS/${COSBACKUPBUCKET}/backups-manage/${HOSTNAME} +ARCBKP_PATH=${BACKUP_PATH}/${DATETIME} +CLEAN_LOG=${BACKUP_PATH}/.cleanup.log +instance_home=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | grep "${instance}" | awk -F ',' '{print $5}'| cut -d/ -f 1,2,3,4,5` +IP=`/sbin/ifconfig | grep "inet" | grep broadcast | awk '{print $2}'` +BACK_LOG=$instance_home/bin/.$2_BackupLOG.out +Maillog="/tmp/.backup_maillog" + + +SLACK_NOTIFY() +{ + des="$instance - Backup - $Server ${database} -- DATABASE Backup issues" + echo "${CUSTNAME} - $instance - Backup - $Server $IP ${database} DATABASE Backup issues" > .Maillive.log + echo "############################" >> .Maillive.log + cat ${BACK_LOG} >> .Maillive.log + longdes=`cat .Maillive.log | sed 's/"//g' | sed "s/'//g"` + slackdes=" BACKUP FAILED for ${Server} - ${CONTAINER}} ...Please investigate " + +### Send Failure notification to a slack channel ## +cat << ! >.curl_$database.sh + curl -X POST -H 'Content-type: application/json' --data '{"text":"$slackdes"}' ${SLACKURL} +! +if [[ -n "${SLACKURL}" ]]; then + /bin/bash .curl_$database.sh > .curl_$database.out 2>&1 +fi + + ##### Create ICD Incident #### + ####### If Backup fails ### + des="${CUSTNAME} - ${instance} - Backup - ${HOSTNAME} ${database} - MASMS -- Backup Failed" + echo "############################" >> .Maillive.log + longdes=`cat .Maillive.log | sed 's/"//g' | sed "s/'//g"` + longdes=`echo "
 ${longdes} 
"` + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi +cat << ! >.curl_${database}_ICD.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"CTGINST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"{servicedesk-pdb-sjc03-2.cds.mro.com:0:50}", + "hstype":"BACKUP" + }' +! +if [[ -n "${ICD_AUTH_KEY}" ]]; then + /bin/bash .curl_${database}_ICD.sh > .curl_${database}_ICD.out 2>&1 +fi + +} + + + +if [ ! -f "$instance_home/sqllib/db2profile" ] +then + echo "ERROR - $instance_home/sqllib/db2profile not found" + EXIT_STATUS=1 +else + . $instance_home/sqllib/db2profile +fi + + +if [ -f $Maillog ] +then + rm $Maillog +fi + +echo "COS bucket = ${COSBACKUPBUCKET} " > $BACK_LOG +echo "BACKUP Start time : ${DATETIME}" >> $BACK_LOG +echo " " >> $BACK_LOG +echo ${HOSTNAME} >> $BACK_LOG +echo " " >> $BACK_LOG +echo " " >> $BACK_LOG + +if [[ $# -eq 4 ]] +then + typeset -l instance=$1 database=$2 + typeset -u INSTANCE=$1 DATABASE=$2 + typeset -i num_backups_to_keep=$3 + typeset -l BKUP_TYPE=$4 + + ##### until the db is bounced to pickup the TRACKMOD parm..We have to hardcode a FULL backup + #####BKUP_TYPE=full + ### BKUP_TYPE = full or inc #### +else + print `tput smso` "Usage! $0 instance database number_of_backups_to_keep" `tput rmso` + exit 1 +fi + +### Check for the existance of /home/ctginst1/sqllib/db2dump/libdb2compr.so...if it exists, delete it +COMPRESS_LOC=$instance_home/sqllib/db2dump/libdb2compr.so +if [[ -f ${COMPRESS_LOC} ]] +then + rm ${COMPRESS_LOC} +fi + +### Check to see if the database is Running +ps -ef | grep db2sys | grep -v grep > /dev/null 2>&1 +if [ $? -eq 1 ]; then + echo "Database is not active " + echo "$Server,Database Not Active,BACKUP Not Run" > $instance_home/bin/LASTbkupRUN + + ### Send error alert + SLACK_NOTIFY + exit +fi + +### Check to see if the database is HADR +db2pd -hadr -db ${database} | awk -F= '/HADR_ROLE/ {print $2}' | grep STANDBY > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "This is a HADR database" + echo "Backup successful. The timestamp for this backup image is : HADR_DB" + echo "$Server,HADR,NO BACKUPS" > $instance_home/bin/LASTbkupRUN + exit 0 +fi + +### Create the backup directory if it doesnt already exist +if [[ -d $BACKUP_LOGS ]] +then : + else mkdir -m 755 ${BACKUP_LOGS} +fi + +echo "BACKUP Start time : ${DATETIME}" +echo " " +echo $Server +echo " " +echo " " + +db2 -v archive log for db $database | tee -a $BACK_LOG +sleep 30 + +if [[ $num_backups_to_keep -gt 0 ]] +then + ### Backup database + if [ ${BKUP_TYPE} = 'full' ] ; then + db2 -v backup db $database online to $BACKUP_PATH compress UTIL_IMPACT_PRIORITY 50 include logs without prompting | tee -a $BACK_LOG + else + db2 -v backup db $database online INCREMENTAL DELTA to $BACKUP_PATH compress UTIL_IMPACT_PRIORITY 50 include logs without prompting | tee -a $BACK_LOG + fi + grep -Fq "Backup successful." $BACK_LOG + if [ $? = 0 ]; then + Backup_timestamp=`grep timestamp $BACK_LOG | cut -d: -f2` + + ### Need to find all the files associate with the backup ## + ### + ### + ### Need to change this to the db2RemStgManager command to get a list of all backup images just created + ### + ### + + # fi + else + SLACK_NOTIFY + #exit + fi +fi + +######## Copy keystore to COS +set -x +SOURCE1=/mnt/blumeta0/db2/keystore/keystore.p12 +SOURCE2=/mnt/blumeta0/db2/keystore/keystore.sth +TARGET1=backups-manage/${HOSTNAME}/KEYSTORE/keystore.p12 +TARGET2=backups-manage/${HOSTNAME}/KEYSTORE/keystore.sth + +DB2V=`db2level | grep Inform | awk '{print $5}' | sed 's/",//'` +if [ ${DB2V} = "v11.5.7.0" ] +then + db2RemStgManager S3 put server=${SERVER} auth1=${PARM1} auth2=${PARM2} container=${CONTAINER} source=${SOURCE1} target=${TARGET1} + db2RemStgManager S3 put server=${SERVER} auth1=${PARM1} auth2=${PARM2} container=${CONTAINER} source=${SOURCE2} target=${TARGET2} +else + db2RemStgManager ALIAS PUT source=${SOURCE1} target=DB2REMOTE://AWSCOS//${TARGET1} + db2RemStgManager ALIAS PUT source=${SOURCE2} target=DB2REMOTE://AWSCOS//${TARGET2} +fi + +# exclude files that arent backups, e.g. backhist listing. +typeset -i no_backups=`./CheckCOS.sh | grep -i ${database}| cut -d/ -f3| grep 001 |wc -l` +echo " number of backups $no_backups" +### Prune the history file, if and only if the last backup succeeded. +### Remove archive transaction logs for expired backups, if there are a requisite number of successful backups. +### Remove expired backups in step. + +if [[ $num_backups_to_keep -gt 0 && $no_backups -ge $num_backups_to_keep ]] +then + db2 -v connect to $database | tee -a $BACK_LOG + + timestmp=$(db2 -x "select coalesce(max(start), 17890713235959) from \ + (select bigint(start_time) - 1 as start, \ + row_number() over(order by start_time desc) as backup \ + from sysibmadm.db_history \ + where operation = 'B' \ + and objecttype = 'D' \ + and devicetype = 'D' \ + and sqlcode is null \ + and sqlwarn is null \ + ) as zzz \ + where backup = $num_backups_to_keep" ) + + db2 -v prune history $timestmp WITH FORCE OPTION and delete | tee -a $BACK_LOG + + ### loop until the recovery history file is stable and then report it + RC=999 + typeset -i no_loops=0 + while [[ $RC -gt 0 ]] + do + db2 -v list history backup since $timestmp for $database > ${BACKUP_LOGS}/backhist + RC=$? + print RC for list history was $RC + cat ${BACKUP_LOGS}/backhist >> $BACK_LOG + if [[ $no_loops -gt 720 ]] + then + ### then youve been waiting an hour + print $0 "Im tired of waiting for the recovery history file to stabilise. Im giving up" + break + else + sleep 5 + let no_loops=no_loops+1 + fi + done + echo "Content of backhist file:" + cat ${BACKUP_LOGS}/backhist + db2 -v commit | tee -a $BACK_LOG + + db2 -v connect reset | tee -a $BACK_LOG + db2 -v terminate | tee -a $BACK_LOG +fi + +sleep 30 + +if [[ $num_backups_to_keep -eq 0 ]] +then + db2 -v connect to $database | tee -a $BACK_LOG + db2 -x "select location \ + from sysibmadm.db_history \ + where operation = 'X' \ + and operationtype = '1' " > $BACKUP_LOGS/archivelog.zaplist + for log in `cat $BACKUP_LOGS/archivelog.zaplist` + do + printf "`date +'%F %T'`\t%-110s\t%12d k\n" "${log}" "`du -sk ${log} | awk '{print $1}'`" >> ${CLEAN_LOG} + done + ### prune history in step + timestmp=$(db2 -x "select max(start_time) from sysibmadm.db_history where operation = 'X' and operationtype = '1'") + db2 -v prune history $timestmp WITH FORCE OPTION and delete | tee -a $BACK_LOG + wait + db2 -v commit | tee -a $BACK_LOG + db2 -v connect reset | tee -a $BACK_LOG + db2 -v terminate | tee -a $BACK_LOG +fi + +DATETIME=`date +%Y-%m-%d_%H%M%S`; +echo "BACKUP End time : ${DATETIME}" >> $BACK_LOG + +if [[ ${BKUP_STATUS} -gt 0 ]] +then + ### Send error alert + SLACK_NOTIFY +fi + +### Copy the current backup log to the Backup log history file +cat $BACK_LOG >> ${BACKUP_LOGS}/.BackupLOG diff --git a/instance-applications/120-ibm-db2u-database/files/Explain.ddl b/instance-applications/120-ibm-db2u-database/files/Explain.ddl new file mode 100755 index 000000000..8ec322c31 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/Explain.ddl @@ -0,0 +1,17 @@ +-- #DBNAME=`db2 list db directory | grep alias | awk '{ print $4 }' | paste -s -d ' '` +-- DBNAME=BLUDB; +connect to BLUDB; +call SYSPROC.SYSINSTALLOBJECTS('EXPLAIN', 'D', CAST (NULL AS VARCHAR(128)),'SYSTOOLS' ); +call SYSPROC.SYSINSTALLOBJECTS('EXPLAIN', 'C', CAST (NULL AS VARCHAR(128)),'SYSTOOLS' ); +create role EXPLAIN; +grant all on SYSTOOLS.EXPLAIN_ARGUMENT to role EXPLAIN; +grant all on SYSTOOLS.EXPLAIN_DIAGNOSTIC to role EXPLAIN; +grant all on SYSTOOLS.EXPLAIN_DIAGNOSTIC_DATA to role EXPLAIN; +grant all on SYSTOOLS.EXPLAIN_INSTANCE to role EXPLAIN; +grant all on SYSTOOLS.EXPLAIN_OBJECT to role EXPLAIN; +grant all on SYSTOOLS.EXPLAIN_OPERATOR to role EXPLAIN; +grant all on SYSTOOLS.EXPLAIN_PREDICATE to role EXPLAIN; +grant all on SYSTOOLS.EXPLAIN_STATEMENT to role EXPLAIN; +grant all on SYSTOOLS.EXPLAIN_STREAM to role EXPLAIN; +connect reset; +terminate; diff --git a/instance-applications/120-ibm-db2u-database/files/FixInvalidObjects.sh b/instance-applications/120-ibm-db2u-database/files/FixInvalidObjects.sh new file mode 100755 index 000000000..e115cb98d --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/FixInvalidObjects.sh @@ -0,0 +1,25 @@ +#!/bin/bash +################################################################################ +# To be run as the instance owner of the database +# +# +################################################################################ + +DATETIME=`date +%Y%m%d_%H%M%S`; +DBNAME=BLUDB + +db2 connect to ${DBNAME} + +echo " all invalid objects" +db2 "SELECT substr(objectschema,1,20) objectschema, substr(objectname,1,30) objectname, routinename, objecttype FROM syscat.invalidobjects" + +db2 "select 'CALL SYSPROC.ADMIN_REVALIDATE_DB_OBJECTS(NULL, ''' || OBJECTSCHEMA ||''', NULL);' from SYSCAT.INVALIDOBJECTS group by objectschema" > FixObjects.sql +cat FixObjects.sql | grep CALL > TEMP +mv TEMP FixObjects.sql +db2 -tvf FixObjects.sql + +echo "checking again for invalid objects (query should return zero resluts" + +db2 "SELECT substr(objectschema,1,20) objectschema, substr(objectname,1,30) objectname, routinename, objecttype FROM syscat.invalidobjects" + +db2 connect reset diff --git a/instance-applications/120-ibm-db2u-database/files/HADRMON.sh b/instance-applications/120-ibm-db2u-database/files/HADRMON.sh new file mode 100755 index 000000000..1fc581a93 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/HADRMON.sh @@ -0,0 +1,640 @@ +#!/bin/bash +#uthor: Fu Le Qing (Roking) +# Email: leqingfu@cn.ibm.com +# Date: 07-24-2019 +# +# Description: This script detects the the HADR state, and then +# create a task in service desk once the error is caught. +# +# ******** THIS NEEDS TO BE RUN AS INSTANCE OWNER. ************** +# +# Revision history: +# 07-24-2019 Fu Le Qing (Roking) +# Original version +# *************************************************************************** +# 11-06-2019 Fu Le Qing (Roking) +# Add running information for audit +# 05-08-2020 Fu Le Qing (Roking) +# change to monitor the specified database +# 07-07-2021 Fu Le Qing (Roking) +# sent to servicedesk through RSLC API +# *************************************************************************** +# run as below: +# ./HADRMON.sh database_name +# *************************************************************************** +#set -x +CUSTNAME=SMRT_SDB +instance="db2inst1" +. /mnt/backup/bin/.PROPS + +if [ ! -f "${HOME}/sqllib/db2profile" ] +then + echo "ERROR - ${HOME}/sqllib/db2profile not found" + exit +else + . ${HOME}/sqllib/db2profile +fi + +if [[ $# != 1 ]];then + echo "Usage: command database_name" + exit +fi + +HADRMON="/tmp/.hadrmon" +Maillog="/tmp/.Maillog" + +For_audit="${HOME}/bin/LOGS/.HADRcheck.out" + +DATETIME=`date +%Y-%m-%d_%H%M%S` + + +SLACK_NOTIFY() +{ + des="$instance - HADR sync issues $Server ${database} -- DATABASE " + echo "${CUSTNAME} - $instance - HADR sync issues - $HOSTNAME $IP ${database} DATABASE" > .Maillive.log + echo "############################" >> .Maillive.log + #echo "${TESTMSG}" >> .Maillive.log + echo "${longdes} " >> .Maillive.log + longdes=`cat .Maillive.log | sed 's/"//g' | sed "s/'//g"` + +### Send Failure notification to a slack channel ## +cat << ! >.curl_$database.sh +curl -X POST -H 'Content-type: application/json' --data '{"text":"$longdes"}' $SLACKURL +! + if [[ -n "${SLACKURL}" ]]; then +/bin/bash .curl_$database.sh > .curl_$database.out 2>&1 + fi +} + + +if [ -f $HADRMON ] +then + rm $HADRMON +fi + +if [ -f $Maillog ] +then + rm $Maillog +fi + +if [[ -f /mnt/backup/bin/.PROPS ]] +then + Server=`cat /mnt/backup/bin/.PROPS | grep CONTAINER | cut -d= -f2` +else + Server=`hostname` +fi +if [ ! -n "$Server" ];then + Server=`hostname` +fi +IP=`hostname -i` +USERNAME=`whoami` + +status_check() +{ + if [[ "$1" == "SUPERASYNC" ]] + then + if [[ "$2" != "REMOTE_CATCHUP" ]] + then + STATEWELL_DDB=0 + fi + if echo $3 | grep -Ei "maxdb|tridb|bludb" >/dev/null + then + if [[ "$4" == "$5" ]] + then + echo "The Standby database(DDB/AUX) ARCH number is $4 at ${DATETIME}" >> $For_audit + echo "The Primary database ARCH number is $5 at ${DATETIME}" >> $For_audit + echo "Primary and Standby databases(DDB/AUX) are in sync" >> $For_audit + else + echo "The Standby database(DDB/AUX) ARCH number is $4 at ${DATETIME}" >> $For_audit + echo "The Primary database ARCH number is $5 at ${DATETIME}" >> $For_audit + echo "Primary and Standby databases(DDB/AUX) are out of sync" >> $For_audit + fi + fi + else + if [[ "$2" != "PEER" ]] + then + STATEWELL=0 + fi + if echo $3 | grep -Ei "maxdb|tridb|bludb" >/dev/null + then + if [[ "$4" == "$5" ]] + then + echo "The Standby database ARCH number is $4 at ${DATETIME}" >> $For_audit + echo "The Primary database ARCH number is $5 at ${DATETIME}" >> $For_audit + echo "Primary and Standby databases are in sync" >> $For_audit + else + echo "The Standby database ARCH number is $4 at ${DATETIME}" >> $For_audit + echo "The Primary database ARCH number is $5 at ${DATETIME}" >> $For_audit + echo "Primary and Standby databases are out of sync" >> $For_audit + fi + fi + fi +} +flag_instance=0 +instance_status=`db2gcf -s | grep "Available" | wc -l` +if [[ "$instance_status" != "1" ]] +then + flag_instance=1 + if [[ ! -f ${HOME}/.NOSEND_$instance ]] + then + des="$Server instance is not active" + longdes="instance is not active. $Server $IP" + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi +cat << ! >curl.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"DB2INST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"${Server:0:50}", + "hstype":"HADR" + }' +! +SLACK_NOTIFY +if [[ -n "${ICD_AUTH_KEY}" ]]; then + /bin/bash curl.sh > .curl_$database.out 2>&1 + grep -v Received .curl.out | grep -v Dload | grep -v "\-\-:\-\-:\-\-" >$Maillog + if [ -s $Maillog ]; then + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog + fi +else + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog +fi + echo " ----------------- " >>$For_audit + echo "HADR is not active at ${DATETIME}" >> $For_audit + touch ${HOME}/.NOSEND_$instance + fi + exit +fi + +#dbs=(`db2 list db directory | grep -B 5 Indirect | grep "Database name" | cut -d= -f2`) +#for i in ${dbs[*]} +#do + i=$1 + database=${i} + STATEWELL=1 + STATEWELL_DDB=1 + flag_db=0 + db2pd -db $i -hadr >$HADRMON + inactive=`cat $HADRMON | grep -E "HADR is not active|not activated" | wc -l` + if [[ "$inactive" == "1" ]] + then + flag_db=1 + if [[ ! -f ${HOME}/.NOSEND_$i ]] + then + #echo "TO: cds-incident@inotes.cdstest.mro.com" > $Maillog + #echo "From: $Server" >> $Maillog + #echo "Subject: $USERNAME^HADR^$Server^4^HADR is not active" >> $Maillog + #echo "HADR is not active. $Server $IP database:$i" >> $Maillog + #cat $Maillog | /usr/lib/sendmail -t + des="$Server HADR is not active" + longdes="HADR is not active. $Server $IP database:$i" + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi +SLACK_NOTIFY +cat << ! >curl.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"DB2INST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"${Server:0:50}", + "hstype":"HADR" + }' +! +if [[ -n "${ICD_AUTH_KEY}" ]]; then + /bin/bash curl.sh > .curl.out 2>&1 + grep -v Received .curl.out | grep -v Dload | grep -v "\-\-:\-\-:\-\-" >$Maillog + if [ -s $Maillog ]; then + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog + fi +else + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog +fi + touch ${HOME}/.NOSEND_$i + if echo $i | grep -Ei "maxdb|tridb|bludb" >/dev/null + then + echo " ----------------- " >>$For_audit + echo "HADR is not active at ${DATETIME}" >> $For_audit + fi + fi + #continue + exit + fi + + mutitarget=`db2 get db cfg for $i | grep HADR_TARGET_LIST | grep "|" | wc -l` + hadr_role=`db2 get db cfg for $i | grep "HADR database role" | cut -d= -f2 | sed 's/ //g'` + if [[ "$mutitarget" == "1" ]] && [[ "$hadr_role" == "PRIMARY" ]] + then + hadr_syncmode=`cat $HADRMON | grep HADR_SYNCMODE | head -1 | cut -d= -f2 | sed 's/ //g'` + if [[ ! -n $hadr_syncmode ]] + then + hadr_syncmode=`db2 get db cfg for $i | grep HADR_SYNCMODE | cut -d= -f2 | sed 's/ //g'` + fi + hadr_state_sdb=`cat $HADRMON | grep HADR_STATE | head -1 | cut -d= -f2 | sed 's/ //g'` + hadr_flags_sdb=`cat $HADRMON | grep HADR_FLAGS | head -1 | cut -d= -f2` + hadr_flags_error_sdb=`cat $HADRMON | grep HADR_FLAGS | head -1 | grep -iE "ERROR|FULL|BLOCKED" | wc -l` + standby_error_time_sdb=`cat $HADRMON | grep STANDBY_ERROR_TIME | head -1 | cut -d= -f2` + primary_log_file=`cat $HADRMON | grep "PRIMARY_LOG_FILE" | head -1 | cut -d= -f2 | cut -d. -f1 | sed 's/ //'` + standby_log_file=`cat $HADRMON | grep "STANDBY_LOG_FILE" | head -1 | cut -d= -f2 | cut -d. -f1 | sed 's/ //'` + + if echo $i | grep -Ei "maxdb|tridb|bludb" >/dev/null + then + echo " ----------------- " >> $For_audit + fi + status_check $hadr_syncmode $hadr_state_sdb $i $standby_log_file $primary_log_file + hadr_syncmode=`cat $HADRMON | grep HADR_SYNCMODE | tail -1 | cut -d= -f2 | sed 's/ //g'` + if [[ ! -n $hadr_syncmode ]] + then + hadr_syncmode=`db2 get db cfg for $i | grep HADR_SYNCMODE | cut -d= -f2 | sed 's/ //g'` + fi + hadr_state_ddb=`cat $HADRMON | grep HADR_STATE | tail -1 | cut -d= -f2 | sed 's/ //g'` + hadr_flags_ddb=`cat $HADRMON | grep HADR_FLAGS | tail -1 | cut -d= -f2` + hadr_flags_error_ddb=`cat $HADRMON | grep HADR_FLAGS | tail -1 | grep -iE "ERROR|FULL|BLOCKED" | wc -l` + standby_error_time_ddb=`cat $HADRMON | grep STANDBY_ERROR_TIME | tail -1 | cut -d= -f2` + primary_log_file=`cat $HADRMON | grep "PRIMARY_LOG_FILE" | tail -1 | cut -d= -f2 | cut -d. -f1 | sed 's/ //'` + standby_log_file=`cat $HADRMON | grep "STANDBY_LOG_FILE" | tail -1 | cut -d= -f2 | cut -d. -f1 | sed 's/ //'` + status_check $hadr_syncmode $hadr_state_ddb $i $standby_log_file $primary_log_file + flag_db_sdb=0 + flag_db_ddb=0 + if [[ "$STATEWELL" == "0" ]] + then + flag_db_sdb=1 + if [[ ! -f ${HOME}/.NOSEND_sdb_$i ]] + then + #echo "TO: cds-incident@inotes.cdstest.mro.com" > $Maillog + #echo "From: $Server" >> $Maillog + #echo "Subject: $USERNAME^HADR^$Server^4^HADR is out of sync" >> $Maillog + #echo "HADR(SDB) is out of sync. HADR_STATE:$hadr_state_sdb $Server $IP database:$i" >> $Maillog + #cat $Maillog | /usr/lib/sendmail -t + des="$Server HADR is out of sync" + longdes="HADR(SDB) is out of sync. HADR_STATE:$hadr_state_sdb $Server $IP database:$i" + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi +SLACK_NOTIFY +cat << ! >curl.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"DB2INST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"${Server:0:50}", + "hstype":"HADR" + }' +! +if [[ -n "${ICD_AUTH_KEY}" ]]; then + /bin/bash curl.sh > .curl.out 2>&1 + grep -v Received .curl.out | grep -v Dload | grep -v "\-\-:\-\-:\-\-" >$Maillog + if [ -s $Maillog ]; then + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog + fi +else + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog +fi + touch ${HOME}/.NOSEND_sdb_$i + fi + fi + if [[ "$STATEWELL_DDB" == "0" ]] + then + flag_db_ddb=1 + if [[ ! -f ${HOME}/.NOSEND_ddb_$i ]] + then + #echo "TO: cds-incident@inotes.cdstest.mro.com" > $Maillog + #echo "From: $Server" >> $Maillog + #echo "Subject: $USERNAME^HADR^$Server^4^HADR is out of sync" >> $Maillog + #echo "HADR(DDB/AUX) is out of sync. HADR_STATE:$hadr_state_ddb $Server $IP database:$i" >> $Maillog + #cat $Maillog | /usr/lib/sendmail -t + des="$Server HADR is out of sync" + longdes="HADR(DDB/AUX) is out of sync. HADR_STATE:$hadr_state_ddb $Server $IP database:$i" + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi +SLACK_NOTIFY +cat << ! >curl.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"DB2INST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"${Server:0:50}", + "hstype":"HADR" + }' +! +if [[ -n "${ICD_AUTH_KEY}" ]]; then + /bin/bash curl.sh > .curl.out 2>&1 + grep -v Received .curl.out | grep -v Dload | grep -v "\-\-:\-\-:\-\-" >$Maillog + if [ -s $Maillog ]; then + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog + fi +else + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog +fi + touch ${HOME}/.NOSEND_ddb_$i + fi + #continue + exit + fi + if [[ "$hadr_flags_error_sdb" == "1" ]] + then + flag_db_sdb=1 + if [[ ! -f ${HOME}/.NOSEND_sdb_$i ]] + then + #echo "TO: cds-incident@inotes.cdstest.mro.com" > $Maillog + #echo "From: $Server" >> $Maillog + #echo "Subject: $USERNAME^HADR^$Server^4^HADR(SDB) eror $hadr_flags_sdb" >> $Maillog + #echo "HADR(SDB) eror $hadr_flags_sdb occured at $standby_error_time_sdb." >> $Maillog + #echo "$Server $IP database:$i" >> $Maillog + #cat $Maillog | /usr/lib/sendmail -t + des="$Server HADR(SDB) eror $hadr_flags_sdb" + longdes="HADR(SDB) eror $hadr_flags_sdb occured at $standby_error_time_sdb. $Server $IP database:$i" + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi +SLACK_NOTIFY +cat << ! >curl.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"DB2INST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"${Server:0:50}", + "hstype":"HADR" + }' +! +if [[ -n "${ICD_AUTH_KEY}" ]]; then + /bin/bash curl.sh > .curl.out 2>&1 + grep -v Received .curl.out | grep -v Dload | grep -v "\-\-:\-\-:\-\-" >$Maillog + if [ -s $Maillog ]; then + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog + fi +else + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog +fi + touch ${HOME}/.NOSEND_sdb_$i + fi + fi + if [[ "$hadr_flags_error_ddb" == "1" ]] + then + flag_db_ddb=1 + if [[ ! -f ${HOME}/.NOSEND_ddb_$i ]] + then + #echo "TO: cds-incident@inotes.cdstest.mro.com" > $Maillog + #echo "From: $Server" >> $Maillog + #echo "Subject: $USERNAME^HADR^$Server^4^HADR(DDB/AUX) eror $hadr_flags_ddb" >> $Maillog + #echo "HADR(DDB/AUX) eror $hadr_flags_ddb occured at $standby_error_time_ddb." >> $Maillog + #echo "$Server $IP database:$i" >> $Maillog + #cat $Maillog | /usr/lib/sendmail -t + des="$Server HADR(DDB/AUX) eror $hadr_flags_ddb" + longdes="HADR(DDB/AUX) eror $hadr_flags_ddb occured at $standby_error_time_ddb. $Server $IP database:$i" + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi +SLACK_NOTIFY +cat << ! >curl.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"DB2INST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"${Server:0:50}", + "hstype":"HADR" + }' +! +if [[ -n "${ICD_AUTH_KEY}" ]]; then + /bin/bash curl.sh > .curl.out 2>&1 + grep -v Received .curl.out | grep -v Dload | grep -v "\-\-:\-\-:\-\-" >$Maillog + if [ -s $Maillog ]; then + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog + fi +else + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog +fi + touch ${HOME}/.NOSEND_ddb_$i + fi + fi + else + hadr_syncmode=`cat $HADRMON | grep HADR_SYNCMODE | cut -d= -f2 | sed 's/ //g'` + if [[ ! -n $hadr_syncmode ]] + then + hadr_syncmode=`db2 get db cfg for $i | grep HADR_SYNCMODE | cut -d= -f2 | sed 's/ //g'` + fi + hadr_state=`cat $HADRMON | grep HADR_STATE | cut -d= -f2 | sed 's/ //g'` + hadr_flags=`cat $HADRMON | grep HADR_FLAGS | cut -d= -f2` + hadr_flags_error=`cat $HADRMON | grep HADR_FLAGS | grep -iE "ERROR|FULL|BLOCKED" | wc -l` + standby_error_time=`cat $HADRMON | grep STANDBY_ERROR_TIME | cut -d= -f2` + primary_log_file=`cat $HADRMON | grep "PRIMARY_LOG_FILE" | cut -d= -f2 | cut -d. -f1 | sed 's/ //'` + standby_log_file=`cat $HADRMON | grep "STANDBY_LOG_FILE" | cut -d= -f2 | cut -d. -f1 | sed 's/ //'` + if echo $i | grep -Ei "maxdb|tridb|bludb" >/dev/null + then + echo " ----------------- " >> $For_audit + fi + status_check $hadr_syncmode $hadr_state $i $standby_log_file $primary_log_file + flag_db=0 + if [[ "$STATEWELL" == "0" ]] || [[ "$STATEWELL_DDB" == "0" ]] + then + flag_db=1 + if [[ ! -f ${HOME}/.NOSEND_$i ]] + then + #echo "TO: cds-incident@inotes.cdstest.mro.com" > $Maillog + #echo "From: $Server" >> $Maillog + #echo "Subject: $USERNAME^HADR^$Server^4^HADR is out of sync" >> $Maillog + #echo "HADR is out of sync. HADR_STATE:$hadr_state $Server $IP database:$i" >> $Maillog + #cat $Maillog | /usr/lib/sendmail -t + des="$Server HADR is out of sync" + longdes="HADR is out of sync. HADR_STATE:$hadr_state $Server $IP database:$i" + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi +SLACK_NOTIFY +cat << ! >curl.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"DB2INST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"${Server:0:50}", + "hstype":"HADR" + }' +! +if [[ -n "${ICD_AUTH_KEY}" ]]; then + # /bin/bash curl.sh > .curl.out 2>&1 + grep -v Received .curl.out | grep -v Dload | grep -v "\-\-:\-\-:\-\-" >$Maillog + if [ -s $Maillog ]; then + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + # mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog + fi +else + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog +fi + touch ${HOME}/.NOSEND_$i + fi + #continue + exit + fi + if [[ "$hadr_flags_error" == "1" ]] + then + flag_db=1 + if [[ ! -f ${HOME}/.NOSEND_$i ]] + then + #echo "TO: cds-incident@inotes.cdstest.mro.com" > $Maillog + #echo "From: $Server" >> $Maillog + #echo "Subject: $USERNAME^HADR^$Server^4^HADR eror $hadr_flags" >> $Maillog + #echo "HADR eror $hadr_flags occured at $standby_error_time." >> $Maillog + #echo "$Server $IP database:$i" >> $Maillog + #cat $Maillog | /usr/lib/sendmail -t + des="$Server HADR eror $hadr_flags" + longdes="HADR eror $hadr_flags occured at $standby_error_time. $Server $IP database:$i" + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi + +SLACK_NOTIFY + +cat << ! >curl.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"DB2INST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"${Server:0:50}", + "hstype":"HADR" + }' +! +if [[ -n "${ICD_AUTH_KEY}" ]]; then + /bin/bash curl.sh > .curl.out 2>&1 + grep -v Received .curl.out | grep -v Dload | grep -v "\-\-:\-\-:\-\-" >$Maillog + if [ -s $Maillog ]; then + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog + fi +else + echo "###################################################" >>$Maillog + echo $longdes >>$Maillog + mail -s "$Server HADR error, but failed to create task through OSLC API" `cat $Mail_recp` < $Maillog +fi + touch ${HOME}/.NOSEND_$i + fi + fi + fi + + if [[ -f ${HOME}/.NOSEND_$i ]] && [[ "$flag_db" == "0" ]] + then + rm ${HOME}/.NOSEND_$i + fi + if [[ -f ${HOME}/.NOSEND_sdb_$i ]] && [[ "$flag_db_sdb" == "0" ]] + then + rm ${HOME}/.NOSEND_sdb_$i + fi + if [[ -f ${HOME}/.NOSEND_ddb_$i ]] && [[ "$flag_db_ddb" == "0" ]] + then + rm ${HOME}/.NOSEND_ddb_$i + fi +#done + +if [[ -f ${HOME}/.NOSEND_$instance ]] && [[ "$flag_instance" == "0" ]] +then + rm ${HOME}/.NOSEND_$instance +fi diff --git a/instance-applications/120-ibm-db2u-database/files/OwnerCheck.txt b/instance-applications/120-ibm-db2u-database/files/OwnerCheck.txt new file mode 100644 index 000000000..41336d163 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/OwnerCheck.txt @@ -0,0 +1,32 @@ +#### Fix invalid object owners + +if [ ! -d /mnt/backup/bin/SQL ] ; then + mkdir -p /mnt/backup/bin/SQL + echo "${DATETIME}:Creating directory /mnt/backup/bin/SQL" + if [ $? != "0" ] ; then + echo "${DATETIME}: ERROR: Unable to create directory /mnt/backup/bin/SQL" + exit 1 + fi +fi + + + + + + +USER=MAXIMO +db2 "select 'transfer ownership of table MAXIMO.' || tabname || ' to user ${USER} PRESERVE PRIVILEGES;' from syscat.tables where tabschema = 'MAXIMO' and owner != 'MAXIMO'" | grep transfer > ./SQL/Table_ownership.sql + +db2 "select 'transfer ownership of procedure maximo.' || PROCNAME || ' to user ${USER} PRESERVE PRIVILEGES;' from syscat.procedures where procschema = 'MAXIMO' and definer != 'MAXIMO'" | grep transfer > ./SQL/Proc_ownership.sql + +db2 "select 'transfer ownership of trigger maximo.' || trigname || ' to user ${USER} PRESERVE PRIVILEGES;' from syscat.triggers where trigschema = 'MAXIMO' and owner != 'MAXIMO' or definer != 'MAXIMO'" | grep transfer > ./SQL/Trigger_ownership.sql + +db2 "select 'transfer ownership of sequence maximo.' || seqname || ' to user ${USER} PRESERVE PRIVILEGES;' from syscat.sequences where seqschema = 'MAXIMO' and owner != 'MAXIMO'" | grep transfer > ./SQL/Seq_ownership.sql + +db2 "select 'transfer ownership of view maximo.' || viewname || ' to user maximo;' from syscat.views where viewschema = 'MAXIMO' and owner != 'MAXIMO'" | grep transfer > ./SQL/View_ownership.sql + +for x in `ls -1 ./SQL` + do +db2 -tvf ./SQL/${x} | tee ./SQL/${x}.OUT +done + diff --git a/instance-applications/120-ibm-db2u-database/files/PostBackFlow.sh b/instance-applications/120-ibm-db2u-database/files/PostBackFlow.sh new file mode 100755 index 000000000..ee57e46aa --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/PostBackFlow.sh @@ -0,0 +1,32 @@ + +INSTOWNER=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | awk -F ',' '{print $4}' ` +# Resolving the Administration Server owner: + +# Find the home directory +INSTHOME=` cat /etc/passwd | grep ${INSTOWNER} | cut -d: -f6` + +DEST_USER=`db2 list applications show detail | cut -d" " -f1 | grep -v DB2INST1 | grep -v CTGINST1 | grep -v CONNECT |grep MANA | grep -v "\-\-" |grep -v MONITOR | sort -n | uniq | tail -1` +SCRIPT=/mnt/backup/bin/PostBF_Scripts.sh + + echo "db2 connect to bludb" > ${SCRIPT} + echo "db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,CREATE_NOT_FENCED_ROUTINE,IMPLICIT_SCHEMA,LOAD,CREATE_EXTERNAL_ROUTINE,QUIESCE_CONNECT ON DATABASE TO USER ${DEST_USER}" >> ${SCRIPT} + echo "db2 GRANT USE OF TABLESPACE MAXDATA TO USER ${DEST_USER}" >> ${SCRIPT} + echo "db2 GRANT DBADM WITHOUT DATAACCESS WITHOUT ACCESSCTRL ON DATABASE TO USER ${DEST_USER}" >> ${SCRIPT} + echo "db2 GRANT SECADM ON DATABASE TO USER ${DEST_USER}" >> ${SCRIPT} + echo "db2 GRANT DATAACCESS ON DATABASE TO USER ${DEST_USER} " >> ${SCRIPT} + echo "db2 GRANT ACCESSCTRL ON DATABASE TO USER ${DEST_USER}" >> ${SCRIPT} + echo "db2 GRANT ACCESSCTRL ON DATABASE TO USER ${DEST_USER} " >> ${SCRIPT} + +### Update KAFKA + echo "db2 \"update maximo.MSGHUBPROVIDERCFG set PROPVALUE=null where propname='BOOTSTRAPSERVERS' and provider='KAFKA'\" " >> ${SCRIPT} + echo "db2 \"update maximo.MSGHUBPROVIDERCFG set PROPVALUE=null where propname='PASSWORD' and provider='KAFKA'\" " >> ${SCRIPT} + echo "db2 \"commit\" " >> ${SCRIPT} + echo " " + echo " " + cat ${INSTHOME}/Managed/OwnerCheck.txt >> ${SCRIPT} + + echo "db2set db2comm=tcpip,ssl" >> ${SCRIPT} + echo "db2stop force ; ipclean ; db2start" >> ${SCRIPT} + + +chmod 755 ${SCRIPT} diff --git a/instance-applications/120-ibm-db2u-database/files/RUNEXPORT.sh b/instance-applications/120-ibm-db2u-database/files/RUNEXPORT.sh new file mode 100755 index 000000000..b89440654 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/RUNEXPORT.sh @@ -0,0 +1,30 @@ +#!/bin/bash +################################################################################ +# THIS NEEDS TO BE RUN AS INSTANCE OWNER. +################################################################################ +instance=`whoami` +instance_home=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | grep "${instance}" | awk -F ',' '{print $5}'| cut -d/ -f 1,2,3` +if [ -f "$instance_home/sqllib/db2profile" ] +then + . $instance_home/sqllib/db2profile +fi +if [ -s "$instance_home/Scripts/.PROPS" ] +then + . $instance_home/Scripts/.PROPS +fi +db=BLUDB +DATETIME=`date +%Y%m%d_%H%M%S` + +if [ ! -d /mnt/backup/db2inst1/Exports ] + then + mkdir -p /mnt/backup/db2inst1/Exports +fi + + export_path=/mnt/backup/db2inst1/Exports +mkdir -p ${export_path}/${db}_${DATETIME} +cd ${export_path}/${db}_${DATETIME} +db2move $db export -l ./lobs/ +db2look -d $db -e -l -x -o $db.ddl +cd .. +zip -r -q -o ${db}_${DATETIME}.zip ./${db}_${DATETIME} +echo -e "All of the files are saved under folder ${export_path}/${db}_${DATETIME} and compressed into file ${export_path}/${db}_${DATETIME}.zip. \nPlease remember to remove folder ${export_path}/${db}_${DATETIME} if it's not needed any more." diff --git a/instance-applications/120-ibm-db2u-database/files/RUN_OnDemandFULL_BKP.sh b/instance-applications/120-ibm-db2u-database/files/RUN_OnDemandFULL_BKP.sh new file mode 100755 index 000000000..f8b97fd73 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/RUN_OnDemandFULL_BKP.sh @@ -0,0 +1,103 @@ +#!/bin/bash +#set -x + +######################################################### +# Run_Backup.sh +# Run_Backup.sh will be called from the Cron Jobs +# This script will list all local databases running in the instance on a node. It will call the +# DB2_Backup.sh script to run a backup for each running database. +# Variables are set at the top of the DB2_Backup.sh script to determine if a full backup needs to be run +# based on the day of the week. Currently, Saturday is when the full backup runs, incremental backups run +# every all other days. +# +# Variables to be set +# SLACKURL = The channel were notifications are send +# BACKUP_SCRIPT = The backup script that Run_Backup.sh calls +# DAYOFFULL = Defines the day of the week that the full backup will on on (must match the same format as the output from `date`) +# NUMOFBKUPTOKEEP = This defines the number of days to keep a backup image on local disk +# +# Variables determined by the environment +# BACKUPTYPE = Is determined from the `date` command and the DAYOFFULL value +# DB2INSTANCE = Pulled from the environment +# HOSTNAME +# DBNAME = Pulled from the `db2 list db directory` +# +# Backup command issued +# ./DB2_Backup.sh ${DB2INSTANCE} ${DBNAME} ${NUMOFBKUPTOKEEP} ${BACKUPTYPE} 2>>.BackupLOG.stderr > .BackupLOG.out +######################################################### + +. /mnt/backup/bin/.PROPS + +DBINSTANCE=`whoami` +HOSTNAME=`hostname` +BACKUP_DIR=${HOME}/bin +BACKUP_SCRIPT=DB2_Backup.sh +DATETIME=`date +%Y-%m-%d_%H%M%S`; + +if [ ! -f "${HOME}/sqllib/db2profile" ] +then + echo "ERROR - ${HOME}/sqllib/db2profile not found" + EXIT_STATUS=1 +else + . ${HOME}/sqllib/db2profile +fi + + +DOW=`date | awk '{print $1}'` + +# if [ ${DOW} = ${DAYOFFULL} ] ; then + BACKUPTYPE=full +# else +# BACKUPTYPE=inc +# fi + +DBS=`db2 list db directory | grep -B5 "Indirect" | grep "Database name" | awk '{ print $4 }'` +for DBNAME in ${DBS} +do + cd ${BACKUP_DIR} + ./DB2_Backup.sh ${DB2INSTANCE} ${DBNAME} ${NUMOFBKUPTOKEEP} ${BACKUPTYPE} 2>.BackupLOG.stderr > .BackupLOG.out + + RC=$? + if [ ${RC} -ne 0 ]; then + + longdes="Failure to start the Backup job ${DATETIME} CUST=${CUSTNAME} ${RC}" + ## Send Failure notification to a slack channel ## + cat << ! >.curl_${DBNAME}_RUN.sh + curl -X POST -H 'Content-type: application/json' --data '{"text":"$longdes"}' $SLACKURL +! +/bin/bash .curl_${DBNAME}_RUN.sh > .curl_${DBNAME}_RUN.out 2>&1 + + ##### Create ICD Incident #### + ####### If Backup fails ### + des="${DBINSTANCE} - Backup - ${HOSTNAME} ${DBNAME} ${CUSTNAME} - MASMS -- Backup Failed" + echo "TESTING $instance - Backup - $ ${DBNAME} - Backup Failed" > .Maillive.log + echo "############################" >> .Maillive.log + #cat $BACK_LOG >> .Maillive.log + longdes=`cat .Maillive.log | sed 's/"//g' | sed "s/'//g"` + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi + +cat << ! >.curl_${DBNAME}_ICD.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"CTGINST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"{servicedesk-pdb-sjc03-2.cds.mro.com:0:50}", + "hstype":"BACKUP" + }' +! +#####/bin/bash .curl_${DBNAME}_ICD.sh > .curl_${DBNAME}_ICD.out 2>&1 + +fi +done diff --git a/instance-applications/120-ibm-db2u-database/files/Reg-Large_TBSP.sh b/instance-applications/120-ibm-db2u-database/files/Reg-Large_TBSP.sh new file mode 100755 index 000000000..8ba4e5896 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/Reg-Large_TBSP.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -x +db2set db2comm= + +db2stop force ; ipclean ; db2start +sleep 60 +db2 connect to bludb + + +DDT=`db2 -x "select rtrim(DATATYPE) from SYSCAT.TABLESPACES where TBSPACE='MAXDATA' " ` +IDT=`db2 -x "select rtrim(DATATYPE) from SYSCAT.TABLESPACES where TBSPACE='MAXINDEX' "` + +if [[ "${DDT}" != "L" && "${IDT}" != "L" ]]; then + + echo "Converting MAXDATA and MAXINDEX from Regular to Large" + db2 "SELECT char(TBSPACE,20) TBSPACE, case DATATYPE when 'A' then 'Regular' when 'L' then 'Large' end as DATATYPE FROM SYSCAT.TABLESPACES WHERE TBSPACE in ('MAXDATA','MAXINDEX')" + + db2 alter tablespace MAXDATA convert to large + db2 alter tablespace MAXINDEX convert to large + +elif [[ "${DDT}" != "L" && "${IDT}" == "L" ]]; then + + echo "Converting MAXDATA from Regular to Large " + db2 "SELECT char(TBSPACE,20) TBSPACE, case DATATYPE when 'A' then 'Regular' when 'L' then 'Large' end as DATATYPE FROM SYSCAT.TABLESPACES WHERE TBSPACE in ('MAXDATA','MAXINDEX')" + + db2 alter tablespace MAXDATA convert to large + +elif [[ "${IDT}" != "L" && "${DDT}" == "L" ]]; then + + echo "Converting MAXINDEX from Regular to Large " + db2 "SELECT char(TBSPACE,20) TBSPACE, case DATATYPE when 'A' then 'Regular' when 'L' then 'Large' end as DATATYPE FROM SYSCAT.TABLESPACES WHERE TBSPACE in ('MAXDATA','MAXINDEX')" + + db2 alter tablespace MAXINDEX convert to large + +else + echo "Tablespaces are already converted to Large" + db2 "SELECT char(TBSPACE,20) TBSPACE, case DATATYPE when 'A' then 'Regular' when 'L' then 'Large' end as DATATYPE FROM SYSCAT.TABLESPACES WHERE TBSPACE in ('MAXDATA','MAXINDEX')" +fi + + +db2 "select 'REORG TABLE '|| RTRIM(TABSCHEMA) || '.\"' || RTRIM(tabname)||'\" ;' from syscat.tables where tabschema not like 'SYS%' and type ='T'" | grep REORG > ALL_TB_REORG.sql +db2 "select 'REORG INDEXES ALL FOR TABLE '|| RTRIM(TABSCHEMA) || '.\"' || RTRIM(tabname)||'\" ;' from syscat.tables where tabschema not like 'SYS%' and type ='T'" | grep REORG > ALL_IX_REORG.sql +db2 "select 'RUNSTATS ON TABLE '|| RTRIM(TABSCHEMA) || '.\"' || RTRIM(tabname)||'\" ON ALL COLUMNS WITH DISTRIBUTION ON ALL COLUMNS AND DETAILED INDEXES ALL;' from syscat.tables where tabschema not like 'SYS%' and type ='T'" | grep RUNSTATS > ALL_TB_RUNSTATS.sql + +db2 -tvf ALL_TB_REORG.sql | tee ALL_TB_REORG.OUT +db2 -tvf ALL_IX_REORG.sql | tee ALL_IX_REORG.OUT +db2 -tvf ALL_TB_RUNSTATS.sql | tee ALL_TB_RUNSTATS.OUT + +db2 alter tablespace MAXDATA reduce max +db2 alter tablespace MAXINDEX reduce max + +db2set db2comm=TCPIP,SSL +db2stop force ; ipclean ; db2start diff --git a/instance-applications/120-ibm-db2u-database/files/Run_Backup.sh b/instance-applications/120-ibm-db2u-database/files/Run_Backup.sh new file mode 100755 index 000000000..72f29f0c8 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/Run_Backup.sh @@ -0,0 +1,109 @@ +#!/bin/bash +#set -x + +######################################################### +# Run_Backup.sh +# Run_Backup.sh will be called from the Cron Jobs +# This script will list all local databases running in the instance on a node. It will call the +# DB2_Backup.sh script to run a backup for each running database. +# Variables are set at the top of the DB2_Backup.sh script to determine if a full backup needs to be run +# based on the day of the week. Currently, Saturday is when the full backup runs, incremental backups run +# every all other days. +# +# Variables to be set +# SLACKURL = The channel were notifications are send +# BACKUP_SCRIPT = The backup script that Run_Backup.sh calls +# DAYOFFULL = Defines the day of the week that the full backup will on on (must match the same format as the output from `date`) +# NUMOFBKUPTOKEEP = This defines the number of days to keep a backup image on local disk +# +# Variables determined by the environment +# BACKUPTYPE = Is determined from the `date` command and the DAYOFFULL value +# DB2INSTANCE = Pulled from the environment +# HOSTNAME +# DBNAME = Pulled from the `db2 list db directory` +# +# Backup command issued +# ./DB2_Backup.sh ${DB2INSTANCE} ${DBNAME} ${NUMOFBKUPTOKEEP} ${BACKUPTYPE} 2>>.BackupLOG.stderr > .BackupLOG.out +######################################################### + +. /mnt/backup/bin/.PROPS + +DBINSTANCE=`whoami` +HOSTNAME=`hostname` +BACKUP_DIR=${HOME}/bin +BACKUP_SCRIPT=DB2_Backup.sh +DATETIME=`date +%Y-%m-%d_%H%M%S`; + +if [ ! -f "${HOME}/sqllib/db2profile" ] +then + echo "ERROR - ${HOME}/sqllib/db2profile not found" + EXIT_STATUS=1 +else + . ${HOME}/sqllib/db2profile +fi + + +DOW=`date | awk '{print $1}'` + + if [ ${DOW} = ${DAYOFFULL} ] ; then + BACKUPTYPE=full + else + BACKUPTYPE=inc + fi + +DBS=`db2 list db directory | grep -B5 "Indirect" | grep "Database name" | awk '{ print $4 }'` +for DBNAME in ${DBS} +do + cd ${BACKUP_DIR} + ./DB2_Backup.sh ${DB2INSTANCE} ${DBNAME} ${NUMOFBKUPTOKEEP} ${BACKUPTYPE} 2>.BackupLOG.stderr > .BackupLOG.out + + RC=$? + if [ ${RC} -ne 0 ]; then + + longdes="Failure to start the Backup job ${DATETIME} CUST=${CUSTNAME} ${RC}" + ## Send Failure notification to a slack channel ## + cat << ! >.curl_${DBNAME}_RUN.sh + if [[ -n "${SLACKURL}" ]]; then + curl -X POST -H 'Content-type: application/json' --data '{"text":"$longdes"}' $SLACKURL + fi +! +/bin/bash .curl_${DBNAME}_RUN.sh > .curl_${DBNAME}_RUN.out 2>&1 + + ##### Create ICD Incident #### + ####### If Backup fails ### + des="${DBINSTANCE} - Backup - ${HOSTNAME} ${DBNAME} ${CUSTNAME} - MASMS -- Backup Failed" + echo "TESTING $instance - Backup - $ ${DBNAME} - Backup Failed" > .Maillive.log + echo "############################" >> .Maillive.log + #cat $BACK_LOG >> .Maillive.log + longdes=`cat .Maillive.log | sed 's/"//g' | sed "s/'//g"` + ICD_URL="https://servicedesk.mro.com" + if ! curl -k -s --connect-timeout 3 ${ICD_URL} >/dev/null; then + ICD_URL="https://servicedesk.cds.mro.com" + fi + +cat << ! >.curl_${DBNAME}_ICD.sh + curl --insecure --location --request POST "${ICD_URL}/maximo_mif/oslc/os/hsincident?lean=1" \ + --header "Authorization: Basic ${ICD_AUTH_KEY}" \ + --header 'Content-Type: application/json' \ + --data '{ + "description":"$des", + "reportedpriority":4, + "internalpriority":4, + "reportedby":"DB2", + "affectedperson":"CTGINST1", + "description_longdescription":"$longdes", + "siteid":"001", + "classstructureid":"1341", + "classificationid":"IN-DBPERF", + "hshost":"{servicedesk-pdb-sjc03-2.cds.mro.com:0:50}", + "hstype":"BACKUP" + }' +! +if [[ -n "${ICD_AUTH_KEY}" ]]; then + /bin/bash .curl_${DBNAME}_ICD.sh > .curl_${DBNAME}_ICD.out 2>&1 +fi + +fi +done +/bin/bash ${HOME}/bin/runstats_rebind.sh >${HOME}/bin/.runstats_rebind.out 2>&1 +/bin/bash ${HOME}/bin/grant_check.sh bludb >${HOME}/bin/.grant_check.out 2>&1 diff --git a/instance-applications/120-ibm-db2u-database/files/Set_DB_COS_Storage.sh b/instance-applications/120-ibm-db2u-database/files/Set_DB_COS_Storage.sh new file mode 100755 index 000000000..b3847c64a --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/Set_DB_COS_Storage.sh @@ -0,0 +1,7 @@ +. /mnt/backup/bin/.PROPS +if db2 list storage access | grep AWSCOS; then + echo "AWSCOS is available already." +else + echo "AWSCOS is not available. Creating" + db2 catalog storage access alias AWSCOS VENDOR S3 server ${SERVER} user ${PARM1} password ${PARM2} container ${CONTAINER} +fi \ No newline at end of file diff --git a/instance-applications/120-ibm-db2u-database/files/cronRunBKP.sh b/instance-applications/120-ibm-db2u-database/files/cronRunBKP.sh new file mode 100755 index 000000000..0c0dbda30 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/cronRunBKP.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -x + +date +DATETIME=`date +%Y-%m-%d_%H%M%S`; +export COSBACKUPBUCKET=$1 +export CUSTNAME=$2 +export CUSTNAME="Please Set" +sudo -E -u db2inst1 echo "${COSBACKUPBUCKET}, ${CUSTNAME}, ${DATETIME}" >> /mnt/backup/bin/Bucket + #sudo --preserve-env=COSBACKUPBUCKET -u db2inst1 /mnt/blumeta0/home/db2inst1/bin/Run_Backup.sh +sudo -E -u db2inst1 /mnt/blumeta0/home/db2inst1/bin/Run_Backup.sh +date diff --git a/instance-applications/120-ibm-db2u-database/files/dropTempts.sh b/instance-applications/120-ibm-db2u-database/files/dropTempts.sh new file mode 100644 index 000000000..37580a723 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/dropTempts.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# *************************************************************************** +# +# Author : Prudhviraj P +# Email : prudhvirajp@ibm.com +# Date : 15-11-2024 +# Description : Script to drop TEMPTS and associated tablespaces and +# recreate tablespaces with IBM default storage group +# +# *************** THIS NEEDS TO BE RUN AS INSTANCE OWNER *************** +# +# Revision history: +# 15-11-2024 Prudhviraj P +# Original version +# +# *************************************************************************** +# USAGE : +# dropTempts.sh +# +# *************************************************************************** + +# -- ** DEBUG MODE ** +# set -x -- Enabling Debugging Mode +# set -n -- To verify syntax errors + +# -- Script Execution starts here + +ST=$(date +%s) + +# -- Declaring Parameters +DBNAME="BLUDB" +TMPSQL="/tmp/droprecreatetbsp.sql" + +# -- Invoking DB2 Profile + +INST=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | awk -F, '{print $4}'` +INSTHOME=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | grep "${INST}" | awk -F ',' '{print $5}'| cut -d/ -f 1,2,3,4,5` + +. ${INSTHOME}/sqllib/db2profile + +# -- Validate the DB is cataloged + +DBS=`db2 list db directory | grep -E "Database alias" | awk -F '= ' '{print $2}'` + +if grep -qw "${DBNAME}" <<< "${DBS}" ; then + + continue; +else + echo "${DBNAME} is Not CATALOGED or Not FOUND!!!" + exit 1; +fi + +# -- Generate the Script for TEMPTS removal and recreate tabespaces + +db2 "CONNECT TO ${DBNAME}" > /dev/null + +TMPSTG=`db2 -x "SELECT VARCHAR(STORAGE_GROUP_NAME,20) FROM TABLE(ADMIN_GET_STORAGE_PATHS('',-1)) AS T WHERE STORAGE_GROUP_NAME like 'IBMDB2U%'" ` +TMPTBS=`db2 -x "SELECT VARCHAR(TBSP_NAME,30) FROM table (MON_GET_TABLESPACE('', -2)) WHERE TBSP_CONTENT_TYPE IN ('USRTEMP','SYSTEMP') and STORAGE_GROUP_NAME = '${TMPSTG}' " ` +if [[ ! -z ${TMPSTG} ]]; then + + echo "Tablespaces associated with \"${TMPSTG}\" in \"${DBNAME}\" "; + db2 "SELECT char(TBSP_NAME,20) TBSP_NAME, char(STORAGE_GROUP_NAME,20) STORAGE_GROUP_NAME FROM table (MON_GET_TABLESPACE('', -2)) WHERE TBSP_USING_AUTO_STORAGE = 1 AND TBSP_CONTENT_TYPE IN ('ANY','LARGE', 'USRTEMP','SYSTEMP') ORDER BY STORAGE_GROUP_NAME" + + echo "CONNECT TO ${DBNAME};" > ${TMPSQL} + for TS in ${TMPTBS} ; do + + echo "DROP TABLESPACE \"${TS}\";" >> ${TMPSQL} + echo "CREATE TEMPORARY TABLESPACE \"${TS}\" IN DATABASE PARTITION GROUP IBMTEMPGROUP MANAGED BY AUTOMATIC STORAGE USING STOGROUP \"IBMSTOGROUP\" ;" >> ${TMPSQL} + + done + + echo "DROP STOGROUP \"${TMPSTG}\"; " >> ${TMPSQL} + echo "COMMIT WORK; " >> ${TMPSQL} + echo "CONNECT RESET; " >> ${TMPSQL} + echo "TERMINATE; " >> ${TMPSQL} +else + echo "NO TEMP STORAGE Found. Exiting . . . " ; + exit 1; +fi +db2 "CONNECT RESET" > /dev/null + +# -- Execute the script to drop and recreate the TEMPTS and tablespaces + +db2 -tvf ${TMPSQL} -z ${TMPSQL}.log + +rm -rf ${TMPSQL} + +db2 "CONNECT TO ${DBNAME}" > /dev/null +echo "After removing TEMPTS \"${TMPSTG}\" from \"${DBNAME}\"" +db2 "SELECT char(TBSP_NAME,20) TBSP_NAME, char(STORAGE_GROUP_NAME,20) STORAGE_GROUP_NAME FROM table (MON_GET_TABLESPACE('', -2)) WHERE TBSP_USING_AUTO_STORAGE = 1 AND TBSP_CONTENT_TYPE IN ('ANY','LARGE', 'USRTEMP','SYSTEMP') ORDER BY STORAGE_GROUP_NAME" +db2 "CONNECT RESET" > /dev/null + +# -- END of PROCESSING + +ET=$(date +%s) +ELT=$((ET - ST)) +((sec=ELT%60, ELT/=60, min=ELT%60, hrs=ELT/60)) +DURATION=$(printf "Total Execution Time - %d Hrs : %d Mins : %d Secs" $hrs $min $sec) +echo -e "\n$DURATION" + +# -- END OF SCRIPT + diff --git a/instance-applications/120-ibm-db2u-database/files/extract_authorization.sh b/instance-applications/120-ibm-db2u-database/files/extract_authorization.sh new file mode 100755 index 000000000..911bbb78b --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/extract_authorization.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# *************************************************************************** +# Author: Fu Le Qing (Roking) +# Email: leqingfu@cn.ibm.com +# Date: 05-08-2019 +# +# Description: Extract authorizations from database +# +# ******** THIS NEEDS TO BE RUN AS INSTANCE OWNER. ************** +# +# Revision history: +# 05-08-2019 Fu Le Qing (Roking) +# Original version +# 24-06-2024 Prudhviraj +# Added date and time to sql file +# +# *************************************************************************** +# run as below: +# To extract all of the authorizations from database: +#./extract_authorization.sh database_name +# To extract the specified users' authorizations from database: +#./extract_authorization.sh database_name user_name +# *************************************************************************** +#set -x +instance=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | awk -F ',' '{print $4}'` +#instance_home=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | grep "${instance}" | awk -F ',' '{print $5}'| cut -d/ -f 1,2,3` +instance_home=/mnt/blumeta0/home/db2inst1 +DT=`date +'%F-%H-%M'` +if [ ! -f "$instance_home/sqllib/db2profile" ] +then + echo "ERROR - $instance_home/sqllib/db2profile not found" + EXIT_STATUS=1 +else + + . $instance_home/sqllib/db2profile +fi + +if [[ $# != 1 ]] && [[ $# != 2 ]];then + echo "Usage: command database_name" + echo "OR" + echo "Usage: command database_name user_name" + exit +fi + +GRANTS_FILE="$instance_home/bin/grants_$DT.sql" + + +if [ -f $GRANTS_FILE ] +then + rm $GRANTS_FILE +fi + +if [[ $# == 1 ]];then + db2look -x -d $1 -o $GRANTS_FILE + echo "The authorizations are saved into file:" + echo $GRANTS_FILE + exit +fi + +if [[ $# == 2 ]];then + typeset -u user + user=$2 + db2look -x -d $1 | grep -w "TO USER \"$user" + exit +fi diff --git a/instance-applications/120-ibm-db2u-database/files/grant_check.sh b/instance-applications/120-ibm-db2u-database/files/grant_check.sh new file mode 100755 index 000000000..bf8ac3950 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/grant_check.sh @@ -0,0 +1,420 @@ +#!/bin/bash +# *************************************************************************** +# Author: Fu Le Qing (Roking) +# Email: leqingfu@cn.ibm.com +# Date: 11-28-2018 +# +# Description: This script check the privileges of role upon tables, +# grant the new tables to role, and sends an email +# to a specified email list. +# +# ******** THIS NEEDS TO BE RUN AS INSTANCE OWNER. ************** +# +# Revision history: +# 11-28-2018 Fu Le Qing (Roking) +# Original version +# 04-24-2019 Fu Le Qing (Roking) +# Grant read-only role to the existing read user +# Grant read-write role to the existing write user +# 05-05-2019 Fu Le Qing (Roking) +# Grant usage on sequences to read role +# Grant alter on sequences to write role +# 12-23-2019 Fu Le Qing (Roking) +# Skip statistics view +# 02-28-2020 Fu Le Qing (Roking) +# Add blacklist: input table name into file blacklist without schema +# 05-13-2022 Fu Le Qing (Roking) +# Skip alias +# 10-17-2023 Fu Le Qing (Roking) +# Update for MAS +# *************************************************************************** +# run as below: +# ./grant_check.sh database_name | tee -a .grant_check.out +# *************************************************************************** +instance=`whoami` +instance_home=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | grep "${instance}" | awk -F ',' '{print $5}'| sed 's/\/sqllib//'` + +blacklist=$instance_home/Scripts/blacklist_grant + +pidfile="$instance_home/.`basename ${0}`.pid" +if [ -e ${pidfile} ] && kill -0 `cat ${pidfile}` 2>/dev/null +then + exit 0 +fi + +echo $$ > ${pidfile} +trap "rm -f ${pidfile}; exit" SIGHUP SIGINT SIGQUIT SIGABRT SIGTERM EXIT + +if [ ! -f "$instance_home/sqllib/db2profile" ] +then + echo "ERROR - $instance_home/sqllib/db2profile not found" + EXIT_STATUS=1 +else + . $instance_home/sqllib/db2profile +fi + +if [[ $# != 1 ]];then + echo "Usage: command database_name" + exit +fi + +GRANT_TEMP="/tmp/grant_temp.sql" +Mail_recp=$instance_home/CDS/Monitoring/bin/maildba.lst +if [[ -f $instance_home/Scripts/.PROPS ]] +then + Server=`cat $instance_home/Scripts/.PROPS | grep CUSTNAME | cut -d= -f2` +else + Server=`hostname` +fi +if [ ! -n "$Server" ];then + Server=`hostname` +fi +IP=`hostname -i` + +if [ -f $GRANT_TEMP ] +then + rm $GRANT_TEMP +fi + +instance_status=`db2gcf -s | grep "Available" | wc -l` +if [[ "$instance_status" != "1" ]] +then + DATETIME=`date +%Y-%m-%d_%H:%M:%S` + echo "Time : ${DATETIME} Instance is down!" | tee /tmp/.grant_mail + #mail -s "Instance is down $Server $IP" `cat $Mail_recp` < /tmp/.grant_mail + rm /tmp/.grant_mail + exit +fi + +role=`db2 get db cfg for $1 | grep "HADR database role" | cut -d= -f2 |sed 's/ //g'` +if [ "$role" != "STANDBY" ]; then + db2 connect to $1 + if [ $? -eq 0 ]; then + schemalist=(`db2 connect to $1 >/dev/null;db2 -x "select SCHEMANAME from syscat.SCHEMATA where SCHEMANAME in ('MAXIMO','TRIDATA','TRIRIGADC')"`) + i=0 + while [[ $i -lt ${#schemalist[*]} ]] + do + role_read=`db2 connect to $1 >/dev/null;db2 -x "select count(*) from syscat.roles where rolename='${schemalist[$i]}_READ'" | sed 's/\.//'` + if [[ $role_read -eq 0 ]] + then + echo "create role ${schemalist[$i]}_READ;" >>$GRANT_TEMP + fi + + role_write=`db2 connect to $1 >/dev/null;db2 -x "select count(*) from syscat.roles where rolename='${schemalist[$i]}_WRITE'" | sed 's/\.//'` + if [[ $role_write -eq 0 ]] + then + echo "create role ${schemalist[$i]}_WRITE;" >>$GRANT_TEMP + fi + + db2 -x "select case when NOT exists( + select 1 + from syscat.DBAUTH auth + where auth.GRANTEE='${schemalist[$i]}_READ' and auth.CONNECTAUTH='Y') + then 'GRANT CONNECT ON DATABASE TO role ${schemalist[$i]}_READ;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + #db2 -x "select case when NOT exists( + #select 1 + #from syscat.DBAUTH auth + #where auth.GRANTEE='${schemalist[$i]}_READ' and auth.BINDADDAUTH='Y') + #then 'GRANT BINDADD ON DATABASE TO role ${schemalist[$i]}_READ;' + #else '--' + #end + #from sysibm.sysdummy1" >>$GRANT_TEMP + + #db2 -x "select case when NOT exists( + #select 1 + #from syscat.SCHEMAAUTH auth + #where auth.GRANTEE='${schemalist[$i]}_READ' and auth.CREATEINAUTH='Y') + #then 'GRANT CREATEIN ON schema ${schemalist[$i]} TO role ${schemalist[$i]}_READ;' + #else '--' + #end + #from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.WORKLOADAUTH auth + where auth.GRANTEE='${schemalist[$i]}_READ' and auth.USAGEAUTH='Y' and auth.WORKLOADNAME='SYSDEFAULTUSERWORKLOAD') + then 'grant usage on workload sysdefaultuserworkload TO role ${schemalist[$i]}_READ;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_READ' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SYSSH200') + then 'grant execute on package nullid.SYSSH200 to role ${schemalist[$i]}_READ;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_READ' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SQLC2O27') + and exists( + select 1 + from syscat.packages where PKGNAME ='SQLC2O27' + ) + then 'grant execute on package nullid.SQLC2O27 to role ${schemalist[$i]}_READ;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_READ' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SQLC2O28') + and exists( + select 1 + from syscat.packages where PKGNAME ='SQLC2O28' + ) + then 'grant execute on package nullid.SQLC2O28 to role ${schemalist[$i]}_READ;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_READ' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SYSSH100') + then 'grant execute on package nullid.SYSSH100 to role ${schemalist[$i]}_READ;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_READ' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SQLC2K26') + and exists( + select 1 from syscat.packages where PKGNAME='SQLC2K26' + ) + then 'grant execute on package nullid.SQLC2K26 to role ${schemalist[$i]}_READ;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select 'GRANT USAGE ON SEQUENCE \"'|| rtrim(seq.SEQSCHEMA) ||'\".\"'|| rtrim(seq.SEQNAME) || '\" TO role ${schemalist[$i]}_READ;' + from syscat.sequences seq + left join syscat.SEQUENCEAUTH auth on seq.SEQSCHEMA=auth.SEQSCHEMA and seq.SEQNAME=auth.SEQNAME + and GRANTEE='${schemalist[$i]}_READ' and auth.USAGEAUTH='Y' and GRANTEETYPE='R' + where seq.SEQSCHEMA='${schemalist[$i]}' and auth.SEQNAME is null" >>$GRANT_TEMP + + db2 -x "select 'GRANT SELECT ON TABLE \"'|| rtrim(tab.tabschema) ||'\".\"'|| rtrim(tab.tabname) || '\" TO role ${schemalist[$i]}_READ;' + from syscat.tables tab + left join syscat.tabauth auth on tab.TABSCHEMA=auth.TABSCHEMA and tab.TABNAME=auth.TABNAME + and GRANTEE='${schemalist[$i]}_READ' and auth.SELECTAUTH='Y' and GRANTEETYPE='R' + where tab.TABSCHEMA='SYSCAT' and tab.TABNAME in ('SCHEMATA','TABLES','INDEXES','COLUMNS') and auth.tabname is null" >>$GRANT_TEMP + + if [ -s $blacklist ] + then + typeset -u string_read + for tab in `cat $blacklist` + do + string_read+="'"$tab"'," + done + tables=`echo $string_read | sed 's/.$//'` + db2 -x "select 'GRANT SELECT ON TABLE \"'|| rtrim(tab.tabschema) ||'\".\"'|| rtrim(tab.tabname) || '\" TO role ${schemalist[$i]}_READ;' + from syscat.tables tab + left join syscat.tabauth auth on tab.TABSCHEMA=auth.TABSCHEMA and tab.TABNAME=auth.TABNAME + and GRANTEE='${schemalist[$i]}_READ' and auth.SELECTAUTH='Y' and GRANTEETYPE='R' + where tab.tabschema='${schemalist[$i]}' and tab.type<>'A' and auth.tabname is null and SUBSTR(tab.PROPERTY,19,1) <>'Y' and tab.tabname not in ($tables)" >>$GRANT_TEMP + for tab in `cat $blacklist` + do + typeset -u table_name=$tab + db2 -x "select 'revoke SELECT ON TABLE \"'|| rtrim(tabschema) ||'\".\"'|| rtrim(tabname) || '\" from role ${schemalist[$i]}_READ;' + from syscat.tabauth + where GRANTEE='${schemalist[$i]}_READ' and SELECTAUTH='Y' and tabschema='${schemalist[$i]}' and tabname='$table_name'" >>$GRANT_TEMP + db2 -x "select 'revoke insert ON TABLE \"'|| rtrim(tabschema) ||'\".\"'|| rtrim(tabname) || '\" from role ${schemalist[$i]}_READ;' + from syscat.tabauth + where GRANTEE='${schemalist[$i]}_READ' and INSERTAUTH='Y' and tabschema='${schemalist[$i]}' and tabname='$table_name'" >>$GRANT_TEMP + db2 -x "select 'revoke update ON TABLE \"'|| rtrim(tabschema) ||'\".\"'|| rtrim(tabname) || '\" from role ${schemalist[$i]}_READ;' + from syscat.tabauth + where GRANTEE='${schemalist[$i]}_READ' and DELETEAUTH='Y' and tabschema='${schemalist[$i]}' and tabname='$table_name'" >>$GRANT_TEMP + db2 -x "select 'revoke delete ON TABLE \"'|| rtrim(tabschema) ||'\".\"'|| rtrim(tabname) || '\" from role ${schemalist[$i]}_READ;' + from syscat.tabauth + where GRANTEE='${schemalist[$i]}_READ' and UPDATEAUTH='Y' and tabschema='${schemalist[$i]}' and tabname='$table_name'" >>$GRANT_TEMP + done + else + db2 -x "select 'GRANT SELECT ON TABLE \"'|| rtrim(tab.tabschema) ||'\".\"'|| rtrim(tab.tabname) || '\" TO role ${schemalist[$i]}_READ;' + from syscat.tables tab + left join syscat.tabauth auth on tab.TABSCHEMA=auth.TABSCHEMA and tab.TABNAME=auth.TABNAME + and GRANTEE='${schemalist[$i]}_READ' and auth.SELECTAUTH='Y' and GRANTEETYPE='R' + where tab.tabschema='${schemalist[$i]}' and tab.type<>'A' and auth.tabname is null and SUBSTR(tab.PROPERTY,19,1) <>'Y'" >>$GRANT_TEMP + fi + + ####db2 -x "select distinct 'grant role ${schemalist[$i]}_READ to user ' ||tab.GRANTEE||';' + ####from SYSCAT.TABAUTH tab + ####left join syscat.roleauth ro on tab.GRANTEE=ro.GRANTEE and ro.ROLENAME='${schemalist[$i]}_READ' + ####where ro.rolename is null and tab.GRANTEETYPE='U' and tab.GRANTEE <>'${schemalist[$i]}_READ' and tab.SELECTAUTH='Y' and tab.TABSCHEMA='${schemalist[$i]}'" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.DBAUTH auth + where auth.GRANTEE='${schemalist[$i]}_WRITE' and auth.CONNECTAUTH='Y') + then 'GRANT CONNECT ON DATABASE TO role ${schemalist[$i]}_WRITE;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.DBAUTH auth + where auth.GRANTEE='${schemalist[$i]}_WRITE' and auth.BINDADDAUTH='Y') + then 'GRANT BINDADD ON DATABASE TO role ${schemalist[$i]}_WRITE;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.SCHEMAAUTH auth + where auth.GRANTEE='${schemalist[$i]}_WRITE' and auth.CREATEINAUTH='Y') + then 'GRANT CREATEIN ON schema ${schemalist[$i]} TO role ${schemalist[$i]}_WRITE;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.WORKLOADAUTH auth + where auth.GRANTEE='${schemalist[$i]}_WRITE' and auth.USAGEAUTH='Y' and auth.WORKLOADNAME='SYSDEFAULTUSERWORKLOAD') + then 'grant usage on workload sysdefaultuserworkload TO role ${schemalist[$i]}_WRITE;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_WRITE' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SYSSH200') + then 'grant execute on package nullid.SYSSH200 to role ${schemalist[$i]}_WRITE;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_WRITE' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SQLC2O27') + and exists( + select 1 + from syscat.packages where PKGNAME ='SQLC2O27' + ) + then 'grant execute on package nullid.SQLC2O27 to role ${schemalist[$i]}_WRITE;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_WRITE' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SQLC2O28') + and exists( + select 1 + from syscat.packages where PKGNAME ='SQLC2O28' + ) + then 'grant execute on package nullid.SQLC2O28 to role ${schemalist[$i]}_WRITE;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_WRITE' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SYSSH100') + then 'grant execute on package nullid.SYSSH100 to role ${schemalist[$i]}_WRITE;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select case when NOT exists( + select 1 + from syscat.PACKAGEAUTH auth + where auth.GRANTEE='${schemalist[$i]}_WRITE' and auth.EXECUTEAUTH='Y' and auth.PKGNAME ='SQLC2K26') + and exists( + select 1 from syscat.packages where PKGNAME='SQLC2K26' + ) + then 'grant execute on package nullid.SQLC2K26 to role ${schemalist[$i]}_WRITE;' + else '--' + end + from sysibm.sysdummy1" >>$GRANT_TEMP + + db2 -x "select 'GRANT ALTER ON SEQUENCE \"'|| rtrim(seq.SEQSCHEMA) ||'\".\"'|| rtrim(seq.SEQNAME) || '\" TO role ${schemalist[$i]}_WRITE;' + from syscat.sequences seq + left join syscat.SEQUENCEAUTH auth on seq.SEQSCHEMA=auth.SEQSCHEMA and seq.SEQNAME=auth.SEQNAME + and GRANTEE='${schemalist[$i]}_WRITE' and auth.ALTERAUTH='Y' and GRANTEETYPE='R' + where seq.SEQSCHEMA='${schemalist[$i]}' and auth.SEQNAME is null" >>$GRANT_TEMP + + db2 -x "select 'GRANT SELECT ON TABLE \"'|| rtrim(tab.tabschema) ||'\".\"'|| rtrim(tab.tabname) || '\" TO role ${schemalist[$i]}_WRITE;' + from syscat.tables tab + left join syscat.tabauth auth on tab.TABSCHEMA=auth.TABSCHEMA and tab.TABNAME=auth.TABNAME + and GRANTEE='${schemalist[$i]}_WRITE' and auth.SELECTAUTH='Y' and GRANTEETYPE='R' + where tab.TABSCHEMA='SYSCAT' and tab.type<>'A' and tab.TABNAME in ('SCHEMATA','TABLES','INDEXES','COLUMNS') and auth.tabname is null" >>$GRANT_TEMP + + if [ -s $blacklist ] + then + typeset -u string_write + for tab in `cat $blacklist` + do + string_write+="'"$tab"'," + done + tables=`echo $string_write | sed 's/.$//'` + db2 -x "select 'GRANT SELECT,insert, update, delete ON TABLE \"'|| rtrim(tab.tabschema) ||'\".\"'|| rtrim(tab.tabname) || '\" TO role ${schemalist[$i]}_WRITE;' + from syscat.tables tab + left join syscat.tabauth auth on tab.TABSCHEMA=auth.TABSCHEMA and tab.TABNAME=auth.TABNAME + and GRANTEE='${schemalist[$i]}_WRITE' and auth.SELECTAUTH='Y' and auth.INSERTAUTH='Y' and auth.DELETEAUTH='Y' and auth.UPDATEAUTH ='Y' and GRANTEETYPE='R' + where tab.tabschema='${schemalist[$i]}' and tab.type<>'A' and auth.tabname is null and SUBSTR(tab.PROPERTY,19,1) <>'Y' and tab.tabname not in ($tables) and tab.type <> 'V'" >>$GRANT_TEMP + db2 -x "select 'GRANT SELECT ON TABLE \"'|| rtrim(tab.tabschema) ||'\".\"'|| rtrim(tab.tabname) || '\" TO role ${schemalist[$i]}_WRITE;' + from syscat.tables tab + left join syscat.tabauth auth on tab.TABSCHEMA=auth.TABSCHEMA and tab.TABNAME=auth.TABNAME + and GRANTEE='${schemalist[$i]}_WRITE' and auth.SELECTAUTH='Y' and GRANTEETYPE='R' + where tab.tabschema='${schemalist[$i]}' and auth.tabname is null and SUBSTR(tab.PROPERTY,19,1) <>'Y' and tab.tabname not in ($tables) and tab.type='V'" >>$GRANT_TEMP + + for tab in `cat $blacklist` + do + typeset -u table_name=$tab + db2 -x "select 'revoke SELECT ON TABLE \"'|| rtrim(tabschema) ||'\".\"'|| rtrim(tabname) || '\" from role ${schemalist[$i]}_WRITE;' + from syscat.tabauth + where GRANTEE='${schemalist[$i]}_WRITE' and SELECTAUTH='Y' and tabschema='${schemalist[$i]}' and tabname='$table_name'" >>$GRANT_TEMP + db2 -x "select 'revoke insert ON TABLE \"'|| rtrim(tabschema) ||'\".\"'|| rtrim(tabname) || '\" from role ${schemalist[$i]}_WRITE;' + from syscat.tabauth + where GRANTEE='${schemalist[$i]}_WRITE' and INSERTAUTH='Y' and tabschema='${schemalist[$i]}' and tabname='$table_name'" >>$GRANT_TEMP + db2 -x "select 'revoke update ON TABLE \"'|| rtrim(tabschema) ||'\".\"'|| rtrim(tabname) || '\" from role ${schemalist[$i]}_WRITE;' + from syscat.tabauth + where GRANTEE='${schemalist[$i]}_WRITE' and DELETEAUTH='Y' and tabschema='${schemalist[$i]}' and tabname='$table_name'" >>$GRANT_TEMP + db2 -x "select 'revoke delete ON TABLE \"'|| rtrim(tabschema) ||'\".\"'|| rtrim(tabname) || '\" from role ${schemalist[$i]}_WRITE;' + from syscat.tabauth + where GRANTEE='${schemalist[$i]}_WRITE' and UPDATEAUTH='Y' and tabschema='${schemalist[$i]}' and tabname='$table_name'" >>$GRANT_TEMP + done + else + db2 -x "select 'GRANT SELECT,insert, update, delete ON TABLE \"'|| rtrim(tab.tabschema) ||'\".\"'|| rtrim(tab.tabname) || '\" TO role ${schemalist[$i]}_WRITE;' + from syscat.tables tab + left join syscat.tabauth auth on tab.TABSCHEMA=auth.TABSCHEMA and tab.TABNAME=auth.TABNAME + and GRANTEE='${schemalist[$i]}_WRITE' and auth.SELECTAUTH='Y' and auth.INSERTAUTH='Y' and auth.DELETEAUTH='Y' and auth.UPDATEAUTH ='Y' and GRANTEETYPE='R' + where tab.tabschema='${schemalist[$i]}' and tab.type<>'A' and auth.tabname is null and SUBSTR(tab.PROPERTY,19,1) <>'Y' and tab.type <> 'V'" >>$GRANT_TEMP + db2 -x "select 'GRANT SELECT ON TABLE \"'|| rtrim(tab.tabschema) ||'\".\"'|| rtrim(tab.tabname) || '\" TO role ${schemalist[$i]}_WRITE;' + from syscat.tables tab + left join syscat.tabauth auth on tab.TABSCHEMA=auth.TABSCHEMA and tab.TABNAME=auth.TABNAME + and GRANTEE='${schemalist[$i]}_WRITE' and auth.SELECTAUTH='Y' and GRANTEETYPE='R' + where tab.tabschema='${schemalist[$i]}' and auth.tabname is null and SUBSTR(tab.PROPERTY,19,1) <>'Y' and tab.type='V'" >>$GRANT_TEMP + fi + + ####db2 -x "select distinct 'grant role ${schemalist[$i]}_WRITE to user ' ||tab.GRANTEE||';' + ####from SYSCAT.TABAUTH tab + ####left join syscat.roleauth ro on tab.GRANTEE=ro.GRANTEE and ro.ROLENAME='${schemalist[$i]}_WRITE' + ####where ro.rolename is null and tab.GRANTEETYPE='U' and tab.GRANTEE <>'${schemalist[$i]}_WRITE' and tab.UPDATEAUTH='Y' and tab.TABSCHEMA='${schemalist[$i]}'" >>$GRANT_TEMP + + if [ -s $GRANT_TEMP ] + then + db2 -tcvf $GRANT_TEMP + rm $GRANT_TEMP + fi + i=`expr $i + 1` + done + fi +fi diff --git a/instance-applications/120-ibm-db2u-database/files/reorgTablesIndexesInplace2.sh b/instance-applications/120-ibm-db2u-database/files/reorgTablesIndexesInplace2.sh new file mode 100644 index 000000000..6f594ba73 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/reorgTablesIndexesInplace2.sh @@ -0,0 +1,1336 @@ +#!/bin/sh +## +## HPS created Jan 2019 +## +## http://www.ibm.com/developerworks/data/library/techarticle/dm-1307optimizerunstats/ +## see Identifying fragmented indexes from statistics +## http://www.ibm.com/developerworks/data/library/techarticle/dm-1307optimizerunstats/#Listing%207 +##0 23 * * 5 (. ~/sqllib/db2profile; ~/dba/bin/misc/reorgTablesIndexesInplace2.sh -s OMDB -tb_stats -if_stats -window 120 -tr >> ~/dba/logs/reorgTablesIndexesInplace2.sh.log 2>&1 ) +## script to reorg tables online and indexes offline based on different criteria +## we need to be able to perform an online table reorg and an offline indexes all reorg in the same run of the script +## + +UsageHelp() +{ + + echo "Script to perform reorg tables, indexes online (inplace) " + echo " also to REORG INDEXES ALL FOR TABLE offline" + echo " db2 performs the online reorgs asynchronously" + echo "" + echo "Usage: ${0} [options]" + echo " where [options] is one of the following:" + echo " -h: displays this usage screen" + echo " -db: dbname, default is all cataloged databases" + echo "" + echo " -s: table schemaname" + echo " -t: table(s) to reorg" + echo "-tb_stats: reorg tables reported by REORGCHK_TB_STATS" + echo " -ti: reorg table index(s), format must be TABSCHEMA.TABNAME.INDSCHEMA.INDNAME" + echo "-ix_stats: reorg table index(s) reported by REORGCHK_IX_STATS" + echo "-if_stats: reorg indexes all for table(s) offline as reported by index fragmentation NLEAF/SEQUENTIAL_PAGES columns" + echo "" + echo " -ls: list valid table sizes for a particular schema" + echo " -lf: list all fragmented index details for a particular schema, based on valid table sizes" + echo " -lt: list all tables to reorg based on REORGCHK_TB_STATS reorg column, based on valid table sizes" + echo " -li: list all indexes to reorg based on REORGCHK_IX_STATS reorg column, based on valid table sizes" + echo " -l: list tables/indexes that would be reorged" + echo "" + echo " -ittx: ignore tables over a specific threshold size in MBs, default is 20000 MB ie 20 GB" + echo " -ittn: ignore tables under a specific threshold size in MBs, default is 10 MB" + echo " -mar: maximum asynchronous reorgs allowed, default is 3" + echo " -log: don't kick off a reorg if transaction log usage is over a certain percentage, default is 90%" + echo " -window: stop reorg tables/indexes/runstats after a set maintenance timeout window, default is 240 minutes" + echo " -twa: timeout window action: default=2 for online, 1 for offline" + echo " 1=allow current reorg(s) to continue" + echo " 2=stop current reorg(s)" +# echo " 3=stop current reorg(s) if < 80% complete and continue script" + echo " -ignore: ignore specific tables from SYSIBMADM.ADMINTABINFO t0, SYSCAT.TABLES t1 " + echo " eg \"$IGNORE_TABLES_EX\"" + echo " -reorg: table F1 F2 F3 filter reorg, default is *" + echo " -sleep: SLEEP_INTERVAL_TIME, default is 60 seconds" + echo "" + echo " -tr: execute inplace table/index reorg" + echo "" + echo " -trsi: Retrieve table reorganization snapshot information from snap_get_tab_reorg and db2pd -reorgs index" + echo "" + echo "Examples:" + echo " 1. ${0} -h" + echo " 2. ${0} -db dbname -s omdb -ls" + echo " 3. ${0} -s OMDB -t \"YFS_ITEM YFS_TASK_Q YFS_SHIPMENT\" -tb_stats -tr " + echo " 4. ${0} -s OMDB -ti \"OMDB.YFS_SNAPSHOT.OMDB.YFS_SNAPSHOT_I1 OMDB.YFS_ITEM.OMDB.YFS_ITEM_PK\" -ix_stats -tr" + echo " 5. ${0} -s OMDB -t \"YFS_ITEM YFS_SNAPSHOT YFS_IMPORT YFS_EXPORT\" -if_stats -tr" + echo " 6. ${0} -s OMDB -tb_stats -mar 5 -window 10 -log 95 -ittx 30000 -tr" + echo " 7. ${0} -s OMDB -tb_stats -mar 5 -window 10 -log 95 -ignore \"$IGNORE_TABLES_EX\" -reorg \"***\" -tr" + echo " 8. ${0} -s OMDB -tb_stats -if_stats -ittx 100 -ittn 20 -tr" + echo " 9. ${0} -trsi" + + echo "" + +} + +## +## function to check if a string is numeric +## +isNumeric() +{ + echo $1 | grep -E '^[0-9]+$' > /dev/null + + return $? + +} + + +TRSI() +{ + db2 -v "select varchar(tabschema,9) as tabschema, varchar(tabname,32) as tabname, + REORG_STATUS, REORG_COMPLETION, REORG_PHASE, REORG_CURRENT_COUNTER, REORG_MAX_COUNTER, +-- varchar( varchar_format(REORG_START, 'YYYY-MM-DD HH24:MI:SS'),19) as REORG_START, +-- varchar( varchar_format(REORG_END, 'YYYY-MM-DD HH24:MI:SS'),19) as REORG_END, + REORG_START, REORG_END, + REORG_INDEX_ID, REORG_TBSPC_ID + from table(snap_get_tab_reorg('')) + order by REORG_START asc + with ur" + + db2pd -db $DBNAME -reorgs index | sed -n "/Index Reorg Stats:/,//p" + + +} + +log() +{ + TYPE=$1 + MSG="$2" + + DATE=$( date '+%d-%m-%Y %H:%M:%S' ); + + # TYPE: + # 0 = Critical + # 1 = Warn + # 3 = Info + # 5 = Debug' + if [ ${TYPE} -eq 0 ]; then + TYPEMSG="Error" + elif [ ${TYPE} -eq 1 ]; then + TYPEMSG="Warning" + elif [ ${TYPE} -eq 3 ]; then + TYPEMSG="Info" + elif [ ${TYPE} -eq 5 ]; then + TYPEMSG="Debug" + else + TYPEMSG="Other" + fi + + echo -e "${DATE} ${TYPEMSG}: ${MSG}" | tee -a $REORG_TABLE_INDEX_LOG + + return 0 +} + +initTABLE_IN_USE_ARRAY() +{ + + local NUM_ITEMS=$1 + local jj; + + ## + ## initialise the db2 TABLE_IN_USE_ARRAY + ## + for((jj=0; jj<$NUM_ITEMS; jj++)) + do + TABLE_IN_USE_ARRAY[$jj]="" + done + + return 0 + +} + +existTABLE_TABLE_IN_USE_ARRAY() +{ + + local TABLE=$1 + local jj; + ## + ## check if table is in use + ## + for((jj=0; jj<${#TABLE_IN_USE_ARRAY[@]}; jj++)) + do + if [ "${TABLE_IN_USE_ARRAY[$jj]}" == "$TABLE" ]; then + return 0; + fi + done + + return 1; + +} + +addTABLE_TABLE_IN_USE_ARRAY() +{ + + local TABLE=$1 + local jj; + + ## + ## add table in empty slot + ## + for((jj=0; jj<${#TABLE_IN_USE_ARRAY[@]}; jj++)) + do + if [ "${TABLE_IN_USE_ARRAY[$jj]}" == "" ]; then + TABLE_IN_USE_ARRAY[$jj]=$TABLE; + return 0; + fi + done + + return 1; +} + +removeTABLE_TABLE_IN_USE_ARRAY() +{ + + local TABLE=$1 + local jj; + ## + ## remove entry + ## + for((jj=0; jj<${#TABLE_IN_USE_ARRAY[@]}; jj++)) + do + if [ "${TABLE_IN_USE_ARRAY[$jj]}" == "$TABLE" ]; then + TABLE_IN_USE_ARRAY[$jj]=""; + return 0; + fi + done + + return 1; + +} + + +listTABLE_IN_USE_ARRAY() +{ + + local jj; + ## + ## list table entries + ## + for((jj=0; jj<${#TABLE_IN_USE_ARRAY[@]}; jj++)) + do + log 5 "TABLE_IN_USE_ARRAY $jj ${TABLE_IN_USE_ARRAY[$jj]}"; + done + + return 0; + +} + +getValidTablesToReorg() +{ + + getValidTableSizes + + VALID_TABLES_TO_REORG="" + VALID_TABLES_TO_REORG_RAW="" + NUM_VALID_TABLES_TO_REORG=0 + for TABNAME in $VALID_TABLES + do + + RAW=$( db2 -x "call REORGCHK_TB_STATS('T','$SCHEMANAME_IN.$TABNAME')" ); + RAW=$( echo "$RAW" | grep $SCHEMANAME_IN | grep $TABNAME | awk '{ if (NF == 12) print $0 }' | sed 's/ \+/ /g' | grep $REORG ) + rc=$? + if [ $rc -eq 0 ]; then + TABNAME=$( echo "$RAW" | awk '{print $2}' ); + [ "$VALID_TABLES_TO_REORG_RAW_DATA" == "" ] && VALID_TABLES_TO_REORG_RAW_DATA="$RAW" || VALID_TABLES_TO_REORG_RAW_DATA="$VALID_TABLES_TO_REORG_RAW_DATA\n$RAW" + fi + + done + + ## sort the tables based on REORG column + VALID_TABLES_TO_REORG_RAW_DATA=$( echo -e "$VALID_TABLES_TO_REORG_RAW_DATA" | sort -k12 -r); + VALID_TABLES_TO_REORG=$( echo -e "$VALID_TABLES_TO_REORG_RAW_DATA" | awk '{print $2}' ); + NUM_VALID_TABLES_TO_REORG=$( echo -e "$VALID_TABLES_TO_REORG" | wc -l ); + + return 0 + +} + +getValidIndexesToReorg() +{ + + getValidTableSizes + + VALID_INDEXES_TO_REORG="" + VALID_INDEXES_TO_REORG_RAW="" + NUM_VALID_INDEXES_TO_REORG=0 + for TABNAME in $VALID_TABLES + do + + RAW=$( db2 -x "call REORGCHK_IX_STATS('T','$SCHEMANAME_IN.$TABNAME')" ); + ## this can return multiple indexes for same TABNAME + RAW=$( echo "$RAW" | grep $SCHEMANAME_IN | grep $TABNAME | awk '{ if (NF == 21) print $0 }' | sed 's/ \+/ /g' | grep $REORG ); + rc=$? + if [ $rc -eq 0 ]; then + INDNAME=$( echo "$RAW" | awk '{print $1"."$2"."$3"."$4}' ); + [ "$VALID_INDEXES_TO_REORG_RAW_DATA" == "" ] && VALID_INDEXES_TO_REORG_RAW_DATA=$RAW || VALID_INDEXES_TO_REORG_RAW_DATA="$VALID_INDEXES_TO_REORG_RAW_DATA\n$RAW" + fi + + done + + ## sort the indexes based on REORG column + VALID_INDEXES_TO_REORG_RAW_DATA=$( echo -e "$VALID_INDEXES_TO_REORG_RAW_DATA" | sort -k21 -r); + VALID_INDEXES_TO_REORG=$( echo -e "$VALID_INDEXES_TO_REORG_RAW_DATA" | awk '{print $1"."$2"."$3"."$4}' ); + NUM_VALID_INDEXES_TO_REORG=$( echo -e "$VALID_INDEXES_TO_REORG" | wc -l ); + + return 0 + +} + +getValidFragmentedIndexes() +{ + + ## + ## http://www.ibm.com/developerworks/data/library/techarticle/dm-1307optimizerunstats/#Listing%207 + ## + + getValidTableSizes + + VALID_FRAGMENTED_INDEXES_RAW_DATA=$( db2 -x "select rtrim(tabschema)||' '||rtrim(tabname)||' '||rtrim(indschema)||' '||rtrim(indname) + ||' '||indcard||' '||stats_time||' '||lastused||' '||nleaf||' '||sequential_pages + from syscat.indexes where tabschema='$SCHEMANAME_IN' + and not (nleaf = 1 and sequential_pages = 0) + and not (nleaf = 0 and sequential_pages = 1) + and (nleaf - sequential_pages > 10) + and tabname in ( $VALID_TABLES_FORMATTED ) + order by tabname + with ur"; ); + + VALID_FRAGMENTED_INDEXES_RAW_DATA=$(echo "${VALID_FRAGMENTED_INDEXES_RAW_DATA}" | sed 's/ *$//g' ); + VALID_FRAGMENTED_INDEXES=$( echo "${VALID_FRAGMENTED_INDEXES_RAW_DATA}" | sed 's/ *$//g' | cut -d' ' -f2 | uniq ) + NUM_VALID_FRAGMENTED_INDEXES=$( echo "${VALID_FRAGMENTED_INDEXES}" | wc -l ) + + +} + +getValidTableSizes() +{ + + VALID_TABLE_SIZES_RAW_DATA=$( db2 "select t0.tabname, + ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) as TOTAL_TABLE_MB, + cast ((INDEX_OBJECT_P_SIZE / 1024) as integer) as INDEX_SIZE_MB + from SYSIBMADM.ADMINTABINFO t0, SYSCAT.TABLES t1 + where t0.tabschema='$SCHEMANAME_IN' + and t0.tabschema=t1.tabschema + and t0.tabname=t1.tabname + $IGNORE_TABLES + and ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) < $IGNORE_TABLE_SIZE_THRESHOLD_MAX + and ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) > $IGNORE_TABLE_SIZE_THRESHOLD_MIN + order by 2 desc + with ur"; ); + rc=$? + if [ $rc -eq 0 ]; then + VALID_TABLE_SIZES=$( echo "$VALID_TABLE_SIZES_RAW_DATA" | sed '1,3d' | sed '$d' | sed '$d' ); + VALID_TABLE_SIZES=$( echo "$VALID_TABLE_SIZES" | awk 'BEGIN {ORS="\t"} { for(ii=1 ; ii<=NF ; ii++) print $ii; printf "\n"; }'); + VALID_TABLES=$( echo "$VALID_TABLE_SIZES" | awk '{print $1}' ); + NUM_VALID_TABLES=$( echo "$VALID_TABLE_SIZES" | wc -l ); + # log 3 "NUM_VALID_TABLES=$NUM_VALID_TABLES" + + VALID_TABLES_FORMATTED="" + for TABLE in $VALID_TABLES + do + VALID_TABLES_FORMATTED="$VALID_TABLES_FORMATTED'$TABLE'," + done + VALID_TABLES_FORMATTED=$( echo "$VALID_TABLES_FORMATTED" | sed 's/,$//g' ) + else + VALID_TABLES_FORMATTED="'UNKNOWN_TABNAME'" + fi + +} + +## is TABLE within size limits < IGNORE_TABLE_SIZE_THRESHOLD_MAX and > IGNORE_TABLE_SIZE_THRESHOLD_MIN +isTableWithinSizeLimit() +{ + local SCHEMANAME=$1 + local TABNAME=$2 + local RC="" + local rc=0 + + RC=$( db2 -x "select tabname, + ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) as TOTAL_TABLE_MB + from SYSIBMADM.ADMINTABINFO + where tabschema='$SCHEMANAME' + and tabname = '$TABNAME' + and ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) < $IGNORE_TABLE_SIZE_THRESHOLD_MAX + and ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) > $IGNORE_TABLE_SIZE_THRESHOLD_MIN + order by 2 desc + with ur" ); + + rc=$? + return $rc + +} + +## create an list/array of table objects to reorg based on tabnames +createTableOBJECT_ARRAY() +{ + local TABNAMES="$1" + local OBJECT_REORG_TABLE_TYPE=$2 + + ## + ## make the OBJECT_ARRAY for tables and indexes + ## + if [ -z "$OBJECT_ARRAY" ]; then + local let index=0; + else + local let index=${#OBJECT_ARRAY[@]}; + fi + local TID=0 ## Table ID - always 0 for online table reorg + local INDSCHEMA=NULL; + local INDNAME=NULL; + local LOCK_COUNT=0 + + for TABNAME in $TABNAMES + do + ## we need TABLEID, TBSPACEID for IF_STATS as the full TableName: may not be dispalyed in the db2pd output + local RC=$( db2 -x "select TABLEID, TBSPACEID from syscat.tables where tabname='$TABNAME' and tabschema='$SCHEMANAME_IN'" ) + local rc=$? + if [ $rc -eq 0 ]; then + local TABLEID=$( echo $RC | awk '{print $1}' ); + local TBSPACEID=$( echo $RC | awk '{print $2}' ); + OBJECT_ARRAY[$index]="$SCHEMANAME_IN#$TABNAME#$INDSCHEMA#$INDNAME#$TID#NOTSTARTED#2019-01-01-00.00.00#2019-01-01-00.00.00#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE" + let index+=1 + fi + done + +} + +## create an list/array of table objects to reorg based on indnames +createIndexOBJECT_ARRAY() +{ + + local INDNAMES="$1" + local OBJECT_REORG_TABLE_TYPE=$2 + local let index=0 + local TABLEID=9999; + local TBSPACEID=9999; + local LOCK_COUNT=0; + + for INDEX in $INDNAMES + do + local TABSCHEMA=$( echo $INDEX | cut -d. -f1); + local TABNAME=$( echo $INDEX | cut -d. -f2); + local INDSCHEMA=$( echo $INDEX | cut -d. -f3); + local INDNAME=$( echo $INDEX | cut -d. -f4); + local RC=$( db2 -x "select IID from syscat.indexes where tabschema = '$TABSCHEMA' and tabname = '$TABNAME' and indschema = '$INDSCHEMA' and indname = '$INDNAME'"); + local rc=$? + if [ $rc -eq 0 ]; then + local IID=$( echo $RC | cut -d' ' -f1); + OBJECT_ARRAY[$index]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#NOTSTARTED#2019-01-01-00.00.00#2019-01-01-00.00.00#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE" + let index+=1 + fi + done + +} + +## +## list out the objects and state +## this can be used for debugging +## +listOBJECT_ARRAY() +{ + + local ii; + log 3 "The following is for debug purposes, Num objects=${#OBJECT_ARRAY[@]}, $OBJECT_NUM_TB_STATS:$OBJECT_NUM_IX_STATS:$OBJECT_NUM_IF_STATS" + for((ii=0; ii< ${#OBJECT_ARRAY[@]}; ii++)) + do + echo "${OBJECT_ARRAY[$ii]}" | tee -a $REORG_TABLE_INDEX_DEBUG + done + +} + +## get the number tb_stats, ix_stats and if_stats objects +getNUM_OBJECT_REORG_TABLE_TYPE_OBJECT_ARRAY() +{ + + local rc=0; + local ii; + for((ii=0; ii< ${#OBJECT_ARRAY[@]}; ii++)) + do + if [ $( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f12 ) -eq $1 ]; then + let rc+=1; + fi + done + + return $rc; + +} + +## +## the main event +## +reorgTables() +{ + + ## variables that need to be reset on each run of the function + NUM_REORGS_IN_PROGRESS=0; + NUM_REORGS_KICKED_OFF=0; + NUM_REORGS_COMPLETED=0; + NUM_REORGS_STOPPED=0; + NUM_REORGS_ABORTED=0; + + while true + do + + ## check reorg window maintenance time + MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS=$( date '+%s' ); + DIFF=$(( MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS - REORG_TIMEOUT_WINDOW_START_TIME_SECONDS )); + + ## safety valve - if for some reason the logic can't stop the reorgs + if [ $DIFF -ge $(( REORG_TIMEOUT_WINDOW_SECONDS + REORG_TIMEOUT_OVERFLOW_VALVE )) ]; then + log 1 "REORG_TIMEOUT_OVERFLOW_VALVE detected"; + log 1 "Aborting reorgs" + break; + fi + + if [ $DIFF -ge $REORG_TIMEOUT_WINDOW_SECONDS ]; then + + REORG_TIMEOUT_WINDOW_COMPLETED=1; + + ## -twa: timeout window action: default=3 + ## 1=allow current reorg(s) to continue + ## 2=stop current reorg(s) + ## 3=stop current reorg(s) if < 80% complete + if [ $REORG_TIMEOUT_WINDOW_ACTION -eq 1 ]; then + + log 3 "Reorg window ending, reorg window time exceeded, twa=$REORG_TIMEOUT_WINDOW_ACTION" + break + + elif [ $REORG_TIMEOUT_WINDOW_ACTION -eq 2 ]; then + + ## use of the REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER + ## 0 = ABORT those not started and issue a STOP to those STARTED + ## 1 = loop again and see if script exits as all reorgs are COMPLETED and STOPPED and ABORTED + ## 2 = break out + if [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 0 ]; then + let REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER+=1 + log 3 "Reorg window ending, reorg window time exceeded, twa=$REORG_TIMEOUT_WINDOW_ACTION" + log 3 "Aborting reorgs NOTSTARTED and issuing a STOP to those that are STARTED" + elif [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 1 ]; then + let REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER+=1 + log 3 "REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER=$REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER" + elif [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 2 ]; then + log 3 "REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER=$REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER" + log 3 "breaking out of reorg loop" + break; + fi + + fi + + fi + + ## loop for all OBJECTS - extract relevant data from OBJECT array + ## if we have NOTSTARTED in the OBJECT_ARRAY[N] - then kick off a reorg + ## then check tables reorg status + for((ii=0; ii< ${#OBJECT_ARRAY[@]}; ii++)) + do + ## get table related info + TABSCHEMA=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f1 ); + TABNAME=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f2 ); + INDSCHEMA=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f3 ); + INDNAME=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f4 ); + IID=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f5 ); + OBJECT_REORG_STATUS=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f6 ); + OBJECT_REORG_START=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f7 ); + OBJECT_REORG_END=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f8 ); + TABLEID=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f9 ); + TBSPACEID=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f10 ); + LOCK_COUNT=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f11 ); + OBJECT_REORG_TABLE_TYPE=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f12 ); + isTable=0; + isIndex=0; + if [ "$INDSCHEMA" == "NULL" -a "$INDNAME" == "NULL" ]; then + isTable=1; + else + isIndex=1; + fi + + if [ $OBJECT_REORG_TABLE_TYPE -ne $REORG_TABLE_TYPE ]; then + continue; + fi + + # log 5 "${OBJECT_ARRAY[$ii]}" + + ## + ## has OBJECT COMPLETED - no need to continue here + ## + if [ "$OBJECT_REORG_STATUS" == "COMPLETED" -o "$OBJECT_REORG_STATUS" == "STOPPED" -o "$OBJECT_REORG_STATUS" == "ABORTED" ]; then + continue; + fi + + if [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + + ## + ## query db2 for REORG_STATUS etc - there may not be an entry so carry on + ## + RC_SNAP=$( db2 -x "select REORG_STATUS, REORG_COMPLETION, REORG_PHASE, REORG_CURRENT_COUNTER, REORG_MAX_COUNTER, REORG_START, REORG_END, REORG_INDEX_ID, REORG_TBSPC_ID from table(snap_get_tab_reorg('')) where tabschema='$TABSCHEMA' and tabname='$TABNAME' and REORG_START > TIMESTAMP('$WINDOW_START_TIME_DB2') and REORG_INDEX_ID=$IID"); + rc=$? + if [ $rc -ge 2 ]; then + log 0 "Possible error running select query against db2\nrc=$rc\nRC=$RC" + continue; + fi + + if [ $rc -eq 0 ]; then + RC=$( echo "$RC_SNAP" | awk 'BEGIN {ORS="\t"} { for(ii=1 ; ii<=NF ; ii++) print $ii; }') + REORG_STATUS=$( echo "$RC_SNAP" | awk '{print $1}' ); + REORG_COMPLETION=$( echo "$RC_SNAP" | awk '{print $2}' ); + REORG_CURRENT_COUNTER=$( echo "$RC_SNAP" | awk '{print $4}' ); + REORG_MAX_COUNTER=$( echo "$RC_SNAP" | awk '{print $5}' ); + REORG_START=$( echo "$RC_SNAP" | awk '{print $6}' ); + REORG_END=$( echo "$RC_SNAP" | awk '{print $7}' ); + REORG_INDEX_ID=$( echo "$RC_SNAP" | awk '{print $8}' ); + + REORG_PERCENT_COMPLETE=0 + if [ ! -z "$REORG_CURRENT_COUNTER" -a $REORG_CURRENT_COUNTER -gt 0 ]; then + if [ ! -z "$REORG_MAX_COUNTER" -a $REORG_MAX_COUNTER -gt 0 ]; then + if [ $REORG_MAX_COUNTER -ge $REORG_CURRENT_COUNTER ]; then + REORG_PERCENT_COMPLETE=$( echo $REORG_CURRENT_COUNTER $REORG_MAX_COUNTER | awk '{ print int (($1/$2)*100) }' ); + fi + fi + fi + fi + + # log 5 "RC_SNAP=$RC_SNAP" + + elif [ $IF_STATS -eq 3 ]; then + + DB2PD_REORG_INDEX_RECORD=$( db2pd -db $DBNAME -reorgs index | grep -B1 -A11 -w "^TbspaceID: $TBSPACEID" | grep -B1 -A11 -w "TableID: $TABLEID" ); + rc=$? + if [ $rc -eq 0 ]; then + REORG_START=$(echo "$DB2PD_REORG_INDEX_RECORD" | grep '^Start Time:' | awk '{ print $3,$4}' ); + REORG_START_SECONDS=$(date --d="$REORG_START" '+%s'); + + if [ $REORG_START_SECONDS -ge $IF_STATS_WINDOW_START_TIME_DB2 ]; then + REORG_STATUS=$(echo "$DB2PD_REORG_INDEX_RECORD" | grep '^Status:' | awk '{ $1=""; print $0}' | sed 's/^[ \t]*//;s/[ \t]*$//' ); + ## some differences between snap_get_tab_reorg and db2pd -reogrs index output + if [ "$REORG_STATUS" == "In Progress" ]; then + REORG_STATUS="STARTED"; + elif [ "$REORG_STATUS" == "Completed" ]; then + REORG_STATUS="COMPLETED"; + elif [ "$REORG_STATUS" == "Stopped" ]; then + REORG_STATUS="STOPPED"; + fi + + REORG_END=$(echo "$DB2PD_REORG_INDEX_RECORD" | grep '^Start Time:' | grep 'End Time:' | awk '{ print $7,$8}' ); + fi + + fi + + + fi + + + ## + ## has OBJECT been KICKED_OFF or STARTED + ## if it has check to see if is STARTED or COMPLETED + ## update OBJECT_ARRAY + ## update COMPLETION/STOPPED stats + ## + if [ "$OBJECT_REORG_STATUS" == "KICKED_OFF" ] || [ "$OBJECT_REORG_STATUS" == "STARTED" ]; then + if [ ! -z "$REORG_STATUS" ]; then + if [ "$REORG_STATUS" == "STARTED" -o "$REORG_STATUS" == "COMPLETED" -o "$REORG_STATUS" == "STOPPED" ]; then + OBJECT_ARRAY[$ii]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#$REORG_STATUS#$REORG_START#$REORG_END#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE" + fi + + if [ "$REORG_STATUS" == "COMPLETED" -o "$REORG_STATUS" == "STOPPED" ]; then + + removeTABLE_TABLE_IN_USE_ARRAY "$TABSCHEMA.$TABNAME" + rc=$? + if [ $rc -eq 1 ]; then + log 1 "Failed to remove table $TABSCHEMA.$TABNAME from TABLE_IN_USE_ARRAY"; + fi + + if [ "$REORG_STATUS" == "COMPLETED" ]; then + let NUM_REORGS_COMPLETED+=1 + elif [ "$REORG_STATUS" == "STOPPED" ]; then + let NUM_REORGS_STOPPED+=1 + fi + + let NUM_REORGS_IN_PROGRESS-=1 + fi + + fi + + ## + ## OBJECT is NOSTARTED so KICK_OFF a reorg + ## + elif [ "$OBJECT_REORG_STATUS" == "NOTSTARTED" ]; then + + ## dont kick off any reorgs if window timeout passed and TWA=2 + ## OBJECTS become ABORTED - UPDATE OBJECT_ARRAY + if [ $REORG_TIMEOUT_WINDOW_COMPLETED -eq 1 ] && [ $REORG_TIMEOUT_WINDOW_ACTION -eq 2 ] && [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 1 ]; then + REORG_STATUS=ABORTED; + OBJECT_ARRAY[$ii]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#$REORG_STATUS#$OBJECT_REORG_START#$OBJECT_REORG_END#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE"; + let NUM_REORGS_ABORTED+=1; + continue; + + fi + + ## is the TABLE already being used -if it is goto next OBJECT + existTABLE_TABLE_IN_USE_ARRAY "$TABSCHEMA.$TABNAME" + rc=$? + [ $rc -eq 0 ] && continue; + + ## we only want to kick off so many reorgs at any one time + if [ $NUM_REORGS_IN_PROGRESS -eq $MAX_ASYNC_REORGS_ALLOWED ]; then + continue; + fi + + ## + ## The table could already be locked - if it is then by-pass it + ## and ABORT if locked more than 10 times + ## + TABLE_LOCKED=$( db2 "select APPLICATION_HANDLE, LOCK_OBJECT_TYPE, LOCK_MODE, LOCK_CURRENT_MODE, LOCK_STATUS, LOCK_COUNT, LOCK_HOLD_COUNT, TBSP_ID, TAB_FILE_ID from TABLE (MON_GET_LOCKS(NULL, -2)) where TBSP_ID=$TBSPACEID and TAB_FILE_ID=$TABLEID and LOCK_OBJECT_TYPE='TABLE' and LOCK_MODE='IX' with ur"; ); + rc=$? + if [ $rc -eq 0 ]; then + let LOCK_COUNT+=1; + log 1 "Appears table $TABSCHEMA.$TABNAME is already locked by another application(s), LOCK_COUNT=$LOCK_COUNT"; + log 1 "$TABLE_LOCKED"; + if [ $LOCK_COUNT -gt 10 ]; then + OBJECT_REORG_STATUS=ABORTED; + let NUM_REORGS_ABORTED+=1; + log 1 "Aborting table $TABSCHEMA.$TABNAME , LOCK_COUNT=$LOCK_COUNT"; + fi +OBJECT_ARRAY[$ii]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#$OBJECT_REORG_STATUS#$OBJECT_REORG_START#$OBJECT_REORG_END#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE"; + continue; + fi + + ## + ## do a check to see where we are on transaction log space - this could be improved!!!! + ## + LOG_USED=$( db2 "select cast(LOG_UTILIZATION_PERCENT as decimal(5,2)) as PCTUSED, cast((TOTAL_LOG_USED_KB/1024) as Integer) as TOTUSEDMB, cast((TOTAL_LOG_AVAILABLE_KB/1024) as Integer) as TOTAVAILMB, cast((TOTAL_LOG_USED_TOP_KB/1024) as Integer) as TOTUSEDTOPMB FROM SYSIBMADM.LOG_UTILIZATION "); + if [ ! -z "$LOG_USED" ]; then + PCTUSED=$( echo "$LOG_USED" | awk '{ if(NF==4 && $2 ~/^[0-9]+$/) print int($1)}' ); + if [ ! -z "$PCTUSED" ]; then + if [ $PCTUSED -gt $TRANSACTION_LOG_THRESHOLD_PCT ]; then + log 1 "Will not kick off another reorg due to logfile PCTUSED above threshold of $LOG_THRESHOLD\n$LOG_USED" + continue + else + log 3 "$LOG_USED" + fi + fi + fi + + ## + ## kick off another reorg + ## if rc=0 then ok, else we ABORT the OBJECT and don't try again + ## + log 3 "" + + if [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + + if [ $isTable -eq 1 ]; then + + db2 -v "reorg table $TABSCHEMA.$TABNAME inplace allow write access" + rc=$? + + elif [ $isIndex -eq 1 ]; then + db2 -v "reorg table $TABSCHEMA.$TABNAME index $INDSCHEMA.$INDNAME inplace allow write access" + rc=$? + fi + + elif [ $IF_STATS -eq 3 ]; then + + ## for offline we throw a job at db2 and wait a few seconds and check the output + ## output could be "reorg indexes all for table ...", + ## or SQL error + ## or 'DB20000I The REORG command completed successfully.' + ## not sure if there is a better way to do this + TMPLOG="/tmp/$TABSCHEMA.$TABNAME.tmp"; + db2 -v "reorg indexes all for table $TABSCHEMA.$TABNAME allow write access" > $TMPLOG 2>&1 & + sleep 5; + cat $TMPLOG; + RC=$( grep '^SQL' $TMPLOG); + rc=$? + if [ $rc -eq 0 ]; then + log 1 "Failed to kick off reorg\n$RC"; + rc=1; + else + ## reorg could have finished then no need for the big sleep + RC=$( grep 'DB20000I The REORG command completed successfully.' $TMPLOG); + if [ $? -eq 0 ]; then + IF_STATS_BYPASS_SLEEP_INTERVAL_TIME=1; + fi + rc=0; + + fi + rm -f $TMPLOG; + + fi + + if [ $rc -eq 0 ]; then + REORG_STATUS=KICKED_OFF; + else + REORG_STATUS=ABORTED; + fi + OBJECT_ARRAY[$ii]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#$REORG_STATUS#$OBJECT_REORG_START#$OBJECT_REORG_END#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE"; + if [ $rc -ne 0 ]; then + let NUM_REORGS_ABORTED+=1; + continue + fi + + ## add the table to the TABLE_IN_USE_ARRAY + addTABLE_TABLE_IN_USE_ARRAY "$TABSCHEMA.$TABNAME" + rc=$? + if [ $rc -eq 1 ]; then + log 1 "Failed to add table $TABSCHEMA.$TABNAME to TABLE_IN_USE_ARRAY"; + fi + let NUM_REORGS_KICKED_OFF+=1; + let NUM_REORGS_IN_PROGRESS+=1; + + continue; + + fi + + ## + ## ouput STATUS + ## + if [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + log 3 "$NUM_REORGS_IN_PROGRESS:$NUM_REORGS_KICKED_OFF:$NUM_REORGS_ABORTED:$NUM_REORGS_STOPPED:$NUM_REORGS_COMPLETED:$NUM_REORG_OBJECTS $TABSCHEMA.$TABNAME : $RC : $REORG_PERCENT_COMPLETE %" | tee -a $REORG_TABLE_INDEX_DEBUG + elif [ $IF_STATS -eq 3 ]; then + log 3 "$NUM_REORGS_IN_PROGRESS:$NUM_REORGS_KICKED_OFF:$NUM_REORGS_ABORTED:$NUM_REORGS_STOPPED:$NUM_REORGS_COMPLETED:$NUM_REORG_OBJECTS $TABSCHEMA.$TABNAME \n$DB2PD_REORG_INDEX_RECORD "| tee -a $REORG_TABLE_INDEX_DEBUG + + fi + + ## + ## if reorg timeout then issue a stop to current reorgs that are STARTED + ## no error checking for stopping a reorg + ## no need to update OBJECT array as it will be updated on next loop + ## + + if [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + + if [ "$REORG_STATUS" == "STARTED" ] && [ $REORG_TIMEOUT_WINDOW_COMPLETED -eq 1 ] && [ $REORG_TIMEOUT_WINDOW_ACTION -eq 2 ] && [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 1 ]; then + + + if [ $isTable -eq 1 ]; then + db2 -v "reorg table $TABSCHEMA.$TABNAME inplace stop" + rc=$? + elif [ $isIndex -eq 1 ]; then + db2 -v "reorg table $TABSCHEMA.$TABNAME index $INDSCHEMA.$INDNAME inplace stop" + rc=$? + fi + + fi + + fi + + done ## for OBJECT_ARRAY[@] + + ## check if we are done with all OBJECTS + if [ $((NUM_REORGS_COMPLETED + NUM_REORGS_STOPPED + NUM_REORGS_ABORTED)) -ge $NUM_REORG_OBJECTS ]; then + log 3 "All reorgs are completed, stopped or aborted, $NUM_REORGS_COMPLETED:$NUM_REORGS_STOPPED:$NUM_REORGS_ABORTED:$NUM_REORG_OBJECTS" + break + fi + + ## wait some time + if [ $IF_STATS -eq 3 ] && [ $IF_STATS_BYPASS_SLEEP_INTERVAL_TIME -eq 1 ]; then + sleep 1; + IF_STATS_BYPASS_SLEEP_INTERVAL_TIME=0; + else + sleep $SLEEP_INTERVAL_TIME + fi + + done ## while true + +} + + +## init +if [ -f ${HOME}/sqllib/db2profile ]; then + . ${HOME}/sqllib/db2profile +fi + +## script already running ? +if [ $( ps -ef | grep $0 | grep -v grep | wc -l ) -gt 2 ]; then + echo "Warning: appears $0 already running" + echo "$( ps -ef | grep $0 | grep -v grep )"; + exit 1 +fi + +SCRIPT=$(basename $0) +SCRIPT_DIR=$(dirname $0) +WHOAMI=$(whoami) +HOSTNAME=$(hostname) + +## setup some temp work files +LOGDATE=$(date '+%Y%m%d'); +REORG_TABLE_INDEX_LOG=/tmp/${SCRIPT}.tmp.123.log +rm -f $REORG_TABLE_INDEX_LOG +REORG_TABLE_INDEX_DEBUG=/tmp/${SCRIPT}.debug +rm -f $REORG_TABLE_INDEX_DEBUG + +## control variables +LIST_ONLY=0 +LIST_FRAGMENTED_INDEXES=0 +LIST_VALID_TABLE_SIZES=0 +LIST_REORGCHK_TB_STATS_TABLES=0 +LIST_REORGCHK_IX_STATS_TABLES=0 +EXECUTE_TABLE_REORG=0 +IGNORE_TABLE_SIZE_THRESHOLD_MAX=20000; +IGNORE_TABLE_SIZE_THRESHOLD_MIN=10; +MAINTENANCE_TIMEOUT_WINDOW_MINUTES=240; +REORG_TIMEOUT_WINDOW_ACTION=2; +MAX_ASYNC_REORGS_ALLOWED=3; +TRANSACTION_LOG_THRESHOLD_PCT=90; +TB_STATS=0; +IF_STATS=0; +IX_STATS=0; +TRSI=0 +IGNORE_TABLES_EX=" and t0.tabname not like '%\_H' escape '\' and t1.volatile != 'C' " +IGNORE_TABLES=""; +REORG="*"; +SLEEP_INTERVAL_TIME=60; +REORGCHK_TB_IF_STATS_OPTION=""; +REORGCHK_TB_STATS=1; +REORGCHK_IF_STATS=3; + +## user check +if [ $WHOAMI == "root" ]; then + log 0 " This script should be not run as '$WHOAMI', but as instance owner." + exit 1 +fi + +## +## command line arguments +## +while [ $# -gt 0 ] +do + case $1 in + -h|-H|-help|--help) UsageHelp; exit 1 ;; + + -db) shift; [ ! -z $1 ] && DB=$( echo $1 | tr '[a-z]' '[A-Z]' ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + -s) shift; [ ! -z $1 ] && SCHEMANAME_IN=$( echo $1 | tr '[a-z]' '[A-Z]' ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + -t) shift; [ ! -z "$1" ] && TABLE_IN=$( echo "$1" | tr '[a-z]' '[A-Z]' ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + -tb_stats) REORGCHK_TB_IF_STATS_OPTION+=$REORGCHK_TB_STATS; TB_STATS=1 ;; + -ti) shift; [ ! -z "$1" ] && INDEX_IN=$( echo "$1" | tr '[a-z]' '[A-Z]' ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + -ix_stats) IX_STATS=2 ;; + -if_stats) REORGCHK_TB_IF_STATS_OPTION+=$REORGCHK_IF_STATS; IF_STATS=3 ;; + -ittx) shift; isNumeric $1 && { IGNORE_TABLE_SIZE_THRESHOLD_MAX=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -ittn) shift; isNumeric $1 && { IGNORE_TABLE_SIZE_THRESHOLD_MIN=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + + -l) LIST_ONLY=1 ;; + -lf) LIST_FRAGMENTED_INDEXES=1 ;; + -ls) LIST_VALID_TABLE_SIZES=1 ;; + -lt) LIST_REORGCHK_TB_STATS_TABLES=1 ;; + -li) LIST_REORGCHK_IX_STATS_TABLES=1 ;; + + -window) shift; isNumeric $1 && { MAINTENANCE_TIMEOUT_WINDOW_MINUTES=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -twa) shift; isNumeric $1 && { REORG_TIMEOUT_WINDOW_ACTION=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -mar) shift; isNumeric $1 && { MAX_ASYNC_REORGS_ALLOWED=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -log) shift; isNumeric $1 && { TRANSACTION_LOG_THRESHOLD_PCT=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -tr) EXECUTE_TABLE_REORG=1 ;; + + -trsi) TRSI=1 ;; + -reorg) shift; [ ! -z "$1" ] && REORG=$( echo "$1" ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + + -ignore) shift; [ ! -z "$1" ] && { IGNORE_TABLES="$1"; } || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + + -sleep) shift; isNumeric $1 && { SLEEP_INTERVAL_TIME=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + + (-*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;; + (*) break;; + esac + + shift + +done + +## +## some verification +## +if [ -z "$SCHEMANAME_IN" ]; then + log 0 "must enter a schemaname" + exit 1 +fi + +CHECK=1 +if [ $CHECK -eq 0 ]; then + rc=0 + if [ $TB_STATS -eq 1 ] && [ $IX_STATS -eq 2 -o $IF_STATS -eq 3 ]; then + rc=1; + elif [ $IX_STATS -eq 2 ] && [ $TB_STATS -eq 1 -o $IF_STATS -eq 3 ]; then + rc=1; + elif [ $IF_STATS -eq 3 ] && [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + rc=1; + fi + if [ $rc -eq 1 ]; then + log 0 "can't define more than one of -tb_stats, -ix_stats or -if_stats" + exit 1 + fi + +fi ## CHECK + +if [ $TB_STATS -eq 1 ] && [ ! -z "$INDEX_IN" ]; then + log 0 "can't define -ti with -tb_stats" + exit 1 +elif [ $IX_STATS -eq 2 ] && [ ! -z "$TABLE_IN" ]; then + log 0 "can't define -t with -ix_stats" + exit 1 +elif [ $IF_STATS -eq 3 ] && [ ! -z "$INDEX_IN" ]; then + log 0 "can't define -ti with -if_stats" + exit 1 +fi + +if [ $TRANSACTION_LOG_THRESHOLD_PCT -gt 99 ]; then + log 0 "-log option should be less than 100, TRANSACTION_LOG_THRESHOLD_PCT=$TRANSACTION_LOG_THRESHOLD_PCT" + exit 1 +fi +if [ $IGNORE_TABLE_SIZE_THRESHOLD_MIN -ge $IGNORE_TABLE_SIZE_THRESHOLD_MAX ]; then + log 0 "option -ittx should be greater than option -ittn" + exit 1 +fi + +## override some defaults for offline reorgs +#if [ $IF_STATS -eq 3 ]; then +# MAX_ASYNC_REORGS_ALLOWED=1; +# REORG_TIMEOUT_WINDOW_ACTION=1; +# ## make SLEEP_INTERVAL_TIME 1/3 for IF_STATS +# SLEEP_INTERVAL_TIME=$( echo $SLEEP_INTERVAL_TIME | awk '{ print $1/3 }'); +#fi + +## fixup REORG filter string for grep +REORG=$( echo "$REORG" | sed 's/*/\\*/g' | sed 's/-/\\-/g'); + +## need to transform to seconds - easier to work with +## 3/4 time is for reorgs , 1/4 for runstats +MAINTENANCE_TIMEOUT_WINDOW_SECONDS=$(( 60 * MAINTENANCE_TIMEOUT_WINDOW_MINUTES )) +REORG_TIMEOUT_WINDOW_SECONDS=$( echo $MAINTENANCE_TIMEOUT_WINDOW_SECONDS | awk '{ print int(0.75*$1) }'); +RUNSTATS_TIMEOUT_WINDOW_SECONDS=$( echo $MAINTENANCE_TIMEOUT_WINDOW_SECONDS | awk '{ print int(0.25*$1) }'); + +# echo "$MAINTENANCE_TIMEOUT_WINDOW_SECONDS $REORG_TIMEOUT_WINDOW_SECONDS $RUNSTATS_TIMEOUT_WINDOW_SECONDS" + +## +## main +## +log 3 "Starting $0 at $(date) on $HOSTNAME" + +DBNAMES=$( db2 list db directory | grep -E "alias|Indirect" | grep -B 1 Indirect | grep alias | awk '{print $4}' | sort ) + +## +## loops for all dbs +## +for DBNAME in $DBNAMES +do + + ## just process the one db + if [ ! -z "$DB" ] && [ "$DB" != "$DBNAME" ] ; then + continue + fi + + ## can't run script on a STANDBY db + ROLE=$(db2 "get db cfg for $DBNAME" | grep 'HADR database role' | cut -d '=' -f2 | sed 's/ *//g') + if [ -z "$ROLE" ] || [ "$ROLE" == "" ]; then + log 1 " Can't determine hadr database role from 'db2 get db cfg for $DBNAME'" + continue + elif [ "$ROLE" == "STANDBY" ]; then + log 1 " Can't run script '${0}' for $DBNAME with hadr database role '$ROLE'" + continue + fi + + log 3 "DB=$DBNAME ..." + + db2 connect to $DBNAME >> /dev/null 2>&1 + rc=$? + if [ $rc -ne 0 ]; then + log 0 " can't connect to $DBNAME" + continue + fi + + if [ $TRSI -eq 1 ]; then + TRSI + continue + + elif [ $LIST_VALID_TABLE_SIZES -eq 1 ]; then + getValidTableSizes + log 3 "The following $NUM_VALID_TABLES are valid table sizes within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB\n$VALID_TABLE_SIZES_RAW_DATA" + continue + + elif [ $LIST_FRAGMENTED_INDEXES -eq 1 ]; then + getValidFragmentedIndexes + VALID_FRAMENTED_INDEXES_HEADER="TABSCHEMA TABNAME INDSCHEMA INDNAME INDCARD STATS_TIME LAST_USED NLEAF SEQUENTIAL_PAGES"; + log 3 "The following $NUM_VALID_FRAGMENTED_INDEXES are fragmenated indexes based on tables sizes within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB\n$VALID_FRAMENTED_INDEXES_HEADER\n$VALID_FRAGMENTED_INDEXES_RAW_DATA" + continue; + elif [ $LIST_REORGCHK_TB_STATS_TABLES -eq 1 ]; then + getValidTablesToReorg + REORGCHK_TB_STATS_HEADER="TABLE_SCHEMA TABLE_NAME CARD OVERFLOW NPAGES FPAGES ACTIVE_BLOCKS TSIZE F1 F2 F3 REORG"; + log 3 "The following $NUM_VALID_TABLES_TO_REORG are results from REORGCHK_TB_STATS based on tables sizes within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB\n$REORGCHK_TB_STATS_HEADER\n$VALID_TABLES_TO_REORG_RAW_DATA" + continue; + + elif [ $LIST_REORGCHK_IX_STATS_TABLES -eq 1 ]; then + getValidIndexesToReorg + REORGCHK_IX_STAT_HEADER="TABLE_SCHEMA TABLE_NAME INDEX_SCHEMA INDEX_NAME INDCARD NLEAF NUM_EMPTY_LEAFS NLEVELS NUMRIDS_DELETED FULLKEYCARD LEAF_RECSIZE NONLEAF_RECSIZE LEAF_PAGE_OVERHEAD NONLEAF_PAGE_OVERHEAD PCT_PAGES_SAVED F4 F5 F6 F7 F8 REORG"; + log 3 "The following $NUM_VALID_INDEXES_TO_REORG are results from REORGCHK_IX_STATS based on tables sizes within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB\n$REORGCHK_IX_STAT_HEADER\n$VALID_INDEXES_TO_REORG_RAW_DATA" + continue; + + + fi + + + INPLACE=1 + if [ $INPLACE -eq 1 ]; then + + log 3 " SCHEMA: $SCHEMANAME_IN" + log 3 " TABLES: $TABLE_IN" + log 3 "INDEXES: $INDEX_IN" + + ## + ## input table(s) verification + ## verify input table exist + ## and table is within size limits + ## create the OBJECT_ARRAY that holds the relevant table information + ## + if [ ! -z "$TABLE_IN" ]; then + + TABLE_IN=$( echo "$TABLE_IN" | tr ' ' '\n' ); + for TABNAME in $TABLE_IN + do + + ## make sure table exists + RC=$( db2 -x "select tabname from syscat.tables where tabname = '$TABNAME' and tabschema = '$SCHEMANAME_IN' and type = 'T'"); + rc=$? + if [ $rc -ne 0 ]; then + log 0 "input command line table '$TABNAME' does not exist or is invalid" + exit 1 + fi + + isTableWithinSizeLimit $SCHEMANAME_IN $TABNAME + rc=$? + if [ $rc -ne 0 ]; then + log 0 "Table $TABNAME is not within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB" + exit 1 + fi + + done + + if [ $TB_STATS -eq 1 ]; then + createTableOBJECT_ARRAY "$TABLE_IN" $TB_STATS + elif [ $IF_STATS -eq 3 ]; then + createTableOBJECT_ARRAY "$TABLE_IN" $IF_STATS + fi + + elif [ ${#REORGCHK_TB_IF_STATS_OPTION} -eq 2 ]; then + for REORG_TYPE in 0 1 + do + if [ "${REORGCHK_TB_IF_STATS_OPTION:$REORG_TYPE:1}" == "1" ]; then + getValidTablesToReorg + TABLE_IN="$VALID_TABLES_TO_REORG"; + createTableOBJECT_ARRAY "$TABLE_IN" $TB_STATS + elif [ "${REORGCHK_TB_IF_STATS_OPTION:$REORG_TYPE:1}" == "3" ]; then + getValidFragmentedIndexes + TABLE_IN="$VALID_FRAGMENTED_INDEXES" + createTableOBJECT_ARRAY "$TABLE_IN" $IF_STATS + fi + done + + elif [ $TB_STATS -eq 1 ]; then + getValidTablesToReorg + TABLE_IN="$VALID_TABLES_TO_REORG"; + createTableOBJECT_ARRAY "$TABLE_IN" $TB_STATS + elif [ $IF_STATS -eq 3 ]; then + getValidFragmentedIndexes + TABLE_IN="$VALID_FRAGMENTED_INDEXES" + createTableOBJECT_ARRAY "$TABLE_IN" $IF_STATS + fi + + ## + ## input indexes verification + ## verify input indexes exist + ## + if [ ! -z "$INDEX_IN" ]; then + INDEX_IN=$( echo "$INDEX_IN" | tr ' ' '\n' ); + for INDEX in $INDEX_IN + do + ## make sure index exists, especially those input on command line + TABSCHEMA=$( echo $INDEX | cut -d. -f1); + TABNAME=$( echo $INDEX | cut -d. -f2); + INDSCHEMA=$( echo $INDEX | cut -d. -f3); + INDNAME=$( echo $INDEX | cut -d. -f4); + RC=$( db2 -x "select indname from syscat.indexes where tabschema = '$TABSCHEMA' and tabname = '$TABNAME' and indschema = '$INDSCHEMA' and indname = '$INDNAME'"); + rc=$? + if [ $rc -ne 0 ]; then + log 0 " input command line index '$INDEX' does not exist" + exit 1 + fi + + isTableWithinSizeLimit $TABSCHEMA $TABNAME + rc=$? + if [ $rc -ne 0 ]; then + log 0 "Table $TABNAME is not within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB" + exit 1 + fi + + done + + createIndexOBJECT_ARRAY "$INDEX_IN" $IX_STATS; + ## + ## + elif [ $IX_STATS -eq 2 ]; then + getValidIndexesToReorg + INDEX_IN="$VALID_INDEXES_TO_REORG" + createIndexOBJECT_ARRAY "$INDEX_IN" $IX_STATS; + fi + + ## get the NUMBER of tables per reorg table type + getNUM_OBJECT_REORG_TABLE_TYPE_OBJECT_ARRAY $TB_STATS; OBJECT_NUM_TB_STATS=$?; + getNUM_OBJECT_REORG_TABLE_TYPE_OBJECT_ARRAY $IX_STATS; OBJECT_NUM_IX_STATS=$?; + getNUM_OBJECT_REORG_TABLE_TYPE_OBJECT_ARRAY $IF_STATS; OBJECT_NUM_IF_STATS=$?; + + ## just list out the OBJECT_ARRAY and exit + if [ $LIST_ONLY -eq 1 ]; then + + listOBJECT_ARRAY; + exit 1 + fi + + + if [ $EXECUTE_TABLE_REORG -eq 1 ]; then + + + ## + ## this is the main list of what we are going to reorg + ## + echo "" + + listOBJECT_ARRAY; + +# exit 1 + + ## + ## setup some control variables for the main loop + ## + ## REORG_STATUS COMPLETED PAUSED STARTED STOPPED TRUNCATE + ## + MAINTENANCE_TIMEOUT_WINDOW_START_TIME_SECONDS=$( date '+%s' ); + REORG_TIMEOUT_WINDOW_START_TIME_SECONDS=$( date '+%s' ); + WINDOW_START_TIME_DB2=$( date '+%Y-%m-%d-%H.%M.%S' ); + IF_STATS_WINDOW_START_TIME_DB2=$( date '+%s' ); + IF_STATS_BYPASS_SLEEP_INTERVAL_TIME=0; + NUM_REORG_OBJECTS="${#OBJECT_ARRAY[@]}"; +# NUM_REORGS_IN_PROGRESS=0; +# NUM_REORGS_KICKED_OFF=0; +# NUM_REORGS_COMPLETED=0; +# NUM_REORGS_STOPPED=0; +# NUM_REORGS_ABORTED=0; + REORG_TIMEOUT_OVERFLOW_VALVE=300; + REORG_TIMEOUT_WINDOW_COMPLETED=0; + REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER=0; + initTABLE_IN_USE_ARRAY $MAX_ASYNC_REORGS_ALLOWED + + ## multi table reorg option + ## online table reorg and offline index reorgs have different options + MAX_ASYNC_REORGS_ALLOWED_ORG=$MAX_ASYNC_REORGS_ALLOWED; + REORG_TIMEOUT_WINDOW_ACTION_ORG=$REORG_TIMEOUT_WINDOW_ACTION; + SLEEP_INTERVAL_TIME_ORG=$SLEEP_INTERVAL_TIME; + + ## override some defaults for offline reorgs + #if [ $IF_STATS -eq 3 ]; then + # MAX_ASYNC_REORGS_ALLOWED=1; + # REORG_TIMEOUT_WINDOW_ACTION=1; + # ## make SLEEP_INTERVAL_TIME 1/3 for IF_STATS + # SLEEP_INTERVAL_TIME=$( echo $SLEEP_INTERVAL_TIME | awk '{ print $1/3 }'); + #fi + + echo "" + log 3 "Starting reorg of ..." + + if [ ${#REORGCHK_TB_IF_STATS_OPTION} -eq 2 ]; then + for REORG_TYPE in 0 1 + do + + if [ "${REORGCHK_TB_IF_STATS_OPTION:$REORG_TYPE:1}" == "1" ]; then + TB_STATS=1; + IF_STATS=0; + NUM_REORG_OBJECTS=$OBJECT_NUM_TB_STATS; + REORG_TABLE_TYPE=$TB_STATS; + MAX_ASYNC_REORGS_ALLOWED=$MAX_ASYNC_REORGS_ALLOWED_ORG; + REORG_TIMEOUT_WINDOW_ACTION=$REORG_TIMEOUT_WINDOW_ACTION_ORG; + SLEEP_INTERVAL_TIME=$SLEEP_INTERVAL_TIME_ORG; + + elif [ "${REORGCHK_TB_IF_STATS_OPTION:$REORG_TYPE:1}" == "3" ]; then + TB_STATS=0; + IF_STATS=3; + NUM_REORG_OBJECTS=$OBJECT_NUM_IF_STATS; + REORG_TABLE_TYPE=$IF_STATS; + MAX_ASYNC_REORGS_ALLOWED=1; + REORG_TIMEOUT_WINDOW_ACTION=1; + SLEEP_INTERVAL_TIME=$( echo $SLEEP_INTERVAL_TIME_ORG | awk '{ print $1/3 }'); + fi + + reorgTables + done + else + if [ $TB_STATS -eq 1 ]; then + REORG_TABLE_TYPE=$TB_STATS; + elif [ $IX_STATS -eq 2 ]; then + REORG_TABLE_TYPE=$IX_STATS; + elif [ $IF_STATS -eq 3 ]; then + REORG_TABLE_TYPE=$IF_STATS; + fi + reorgTables + fi + + ## list current state of OBJECT_ARRAY + listOBJECT_ARRAY; + + ## + ## now do runstats + ## + log 3 "Starting runstats of ..." + RUNSTATS_TIMEOUT_WINDOW_START_TIME_SECONDS=$( date '+%s' ); + + for((ii=0; ii< ${#OBJECT_ARRAY[@]}; ii++)) + do + + ## check runstats window maintenance time + MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS=$( date '+%s' ); + DIFF=$(( MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS - REORG_TIMEOUT_WINDOW_START_TIME_SECONDS )); + if [ $DIFF -ge $(( REORG_TIMEOUT_WINDOW_SECONDS + RUNSTATS_TIMEOUT_WINDOW_SECONDS)) ]; then + log 3 "$REORG_TIMEOUT_WINDOW_START_TIME_SECONDS +$MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS $REORG_TIMEOUT_WINDOW_SECONDS $RUNSTATS_TIMEOUT_WINDOW_SECONDS $DIFF" + log 3 "Runstats window ending, runstats window time exceeded" + break + fi + + ## get table info + TABSCHEMA=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f1 ); + TABNAME=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f2 ); + INDSCHEMA=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f3 ); + INDNAME=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f4 ); + IID=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f5 ); + OBJECT_REORG_STATUS=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f6 ); + + ## only do a runstats if table/index has completed + ## verify stats time so we dont kick off another runstats on the same table + if [ "$OBJECT_REORG_STATUS" == "COMPLETED" ]; then + STATS_TIME=$( db2 -x "select stats_time from syscat.tables where tabschema='$TABSCHEMA' and tabname='$TABNAME' and stats_time < TIMESTAMP('$WINDOW_START_TIME_DB2') " ); + rc=$? + if [ $rc -eq 0 ]; then + log 3 "Starting runstats on $TABSCHEMA.$TABNAME" + # db2 -v "runstats on table $TABSCHEMA.$TABNAME WITH DISTRIBUTION ON ALL COLUMNS AND SAMPLED DETAILED INDEXES ALL ALLOW WRITE ACCESS"; + db2 -v "runstats on table $TABSCHEMA.$TABNAME WITH DISTRIBUTION ON KEY COLUMNS AND SAMPLED DETAILED INDEXES ALL ALLOW WRITE ACCESS UTIL_IMPACT_PRIORITY 50"; + log 3 "Finished runstats on $TABSCHEMA.$TABNAME" + fi + fi + + done + + fi ## EXECUTE_TABLE_REORG + + + fi ## INPLACE + + +done ## DBNAMES + +## +## cleanup +## + +log 3 "Completed $0 at $(date)" + +exit 0 \ No newline at end of file diff --git a/instance-applications/120-ibm-db2u-database/files/reorgTablesIndexesInplace2_maintenance.sh b/instance-applications/120-ibm-db2u-database/files/reorgTablesIndexesInplace2_maintenance.sh new file mode 100755 index 000000000..94e16231e --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/reorgTablesIndexesInplace2_maintenance.sh @@ -0,0 +1,1336 @@ +#!/bin/sh +## +## HPS created Jan 2019 +## +## http://www.ibm.com/developerworks/data/library/techarticle/dm-1307optimizerunstats/ +## see Identifying fragmented indexes from statistics +## http://www.ibm.com/developerworks/data/library/techarticle/dm-1307optimizerunstats/#Listing%207 +## +## script to reorg tables online and indexes offline based on different criteria +## we need to be able to perform an online table reorg and an offline indexes all reorg in the same run of the script +## + +UsageHelp() +{ + + echo "Script to perform reorg tables, indexes online (inplace) " + echo " also to REORG INDEXES ALL FOR TABLE offline" + echo " db2 performs the online reorgs asynchronously" + echo "" + echo "Usage: ${0} [options]" + echo " where [options] is one of the following:" + echo " -h: displays this usage screen" + echo " -db: dbname, default is all cataloged databases" + echo "" + echo " -s: table schemaname" + echo " -t: table(s) to reorg" + echo "-tb_stats: reorg tables reported by REORGCHK_TB_STATS" + echo " -ti: reorg table index(s), format must be TABSCHEMA.TABNAME.INDSCHEMA.INDNAME" + echo "-ix_stats: reorg table index(s) reported by REORGCHK_IX_STATS" + echo "-if_stats: reorg indexes all for table(s) offline as reported by index fragmentation NLEAF/SEQUENTIAL_PAGES columns" + echo "" + echo " -ls: list valid table sizes for a particular schema" + echo " -lf: list all fragmented index details for a particular schema, based on valid table sizes" + echo " -lt: list all tables to reorg based on REORGCHK_TB_STATS reorg column, based on valid table sizes" + echo " -li: list all indexes to reorg based on REORGCHK_IX_STATS reorg column, based on valid table sizes" + echo " -l: list tables/indexes that would be reorged" + echo "" + echo " -ittx: ignore tables over a specific threshold size in MBs, default is 20000 MB ie 20 GB" + echo " -ittn: ignore tables under a specific threshold size in MBs, default is 10 MB" + echo " -mar: maximum asynchronous reorgs allowed, default is 3" + echo " -log: don't kick off a reorg if transaction log usage is over a certain percentage, default is 90%" + echo " -window: stop reorg tables/indexes/runstats after a set maintenance timeout window, default is 240 minutes" + echo " -twa: timeout window action: default=2 for online, 1 for offline" + echo " 1=allow current reorg(s) to continue" + echo " 2=stop current reorg(s)" +# echo " 3=stop current reorg(s) if < 80% complete and continue script" + echo " -ignore: ignore specific tables from SYSIBMADM.ADMINTABINFO t0, SYSCAT.TABLES t1 " + echo " eg \"$IGNORE_TABLES_EX\"" + echo " -reorg: table F1 F2 F3 filter reorg, default is *" + echo " -sleep: SLEEP_INTERVAL_TIME, default is 60 seconds" + echo "" + echo " -tr: execute inplace table/index reorg" + echo "" + echo " -trsi: Retrieve table reorganization snapshot information from snap_get_tab_reorg and db2pd -reorgs index" + echo "" + echo "Examples:" + echo " 1. ${0} -h" + echo " 2. ${0} -db dbname -s BLUDB -ls" + echo " 3. ${0} -s MAXIMO -t \"YFS_ITEM YFS_TASK_Q YFS_SHIPMENT\" -tb_stats -tr " + echo " 4. ${0} -s MAXIMO -ti \"MAXIMO.YFS_SNAPSHOT.MAXIMO.YFS_SNAPSHOT_I1 MAXIMO.YFS_ITEM.MAXIMO.YFS_ITEM_PK\" -ix_stats -tr" + echo " 5. ${0} -s MAXIMO -t \"YFS_ITEM YFS_SNAPSHOT YFS_IMPORT YFS_EXPORT\" -if_stats -tr" + echo " 6. ${0} -s MAXIMO -tb_stats -mar 5 -window 10 -log 95 -ittx 30000 -tr" + echo " 7. ${0} -s MAXIMO -tb_stats -mar 5 -window 10 -log 95 -ignore \"$IGNORE_TABLES_EX\" -reorg \"***\" -tr" + echo " 8. ${0} -s MAXIMO -tb_stats -if_stats -ittx 100 -ittn 20 -tr" + echo " 9. ${0} -trsi" + + echo "" + +} + +## +## function to check if a string is numeric +## +isNumeric() +{ + echo $1 | grep -E '^[0-9]+$' > /dev/null + + return $? + +} + + +TRSI() +{ + db2 -v "select varchar(tabschema,9) as tabschema, varchar(tabname,32) as tabname, + REORG_STATUS, REORG_COMPLETION, REORG_PHASE, REORG_CURRENT_COUNTER, REORG_MAX_COUNTER, +-- varchar( varchar_format(REORG_START, 'YYYY-MM-DD HH24:MI:SS'),19) as REORG_START, +-- varchar( varchar_format(REORG_END, 'YYYY-MM-DD HH24:MI:SS'),19) as REORG_END, + REORG_START, REORG_END, + REORG_INDEX_ID, REORG_TBSPC_ID + from table(snap_get_tab_reorg('')) + order by REORG_START asc + with ur" + + db2pd -db $DBNAME -reorgs index | sed -n "/Index Reorg Stats:/,//p" + + +} + +log() +{ + TYPE=$1 + MSG="$2" + + DATE=$( date '+%d-%m-%Y %H:%M:%S' ); + + # TYPE: + # 0 = Critical + # 1 = Warn + # 3 = Info + # 5 = Debug' + if [ ${TYPE} -eq 0 ]; then + TYPEMSG="Error" + elif [ ${TYPE} -eq 1 ]; then + TYPEMSG="Warning" + elif [ ${TYPE} -eq 3 ]; then + TYPEMSG="Info" + elif [ ${TYPE} -eq 5 ]; then + TYPEMSG="Debug" + else + TYPEMSG="Other" + fi + + echo -e "${DATE} ${TYPEMSG}: ${MSG}" | tee -a $REORG_TABLE_INDEX_LOG + + return 0 +} + +initTABLE_IN_USE_ARRAY() +{ + + local NUM_ITEMS=$1 + local jj; + + ## + ## initialise the db2 TABLE_IN_USE_ARRAY + ## + for((jj=0; jj<$NUM_ITEMS; jj++)) + do + TABLE_IN_USE_ARRAY[$jj]="" + done + + return 0 + +} + +existTABLE_TABLE_IN_USE_ARRAY() +{ + + local TABLE=$1 + local jj; + ## + ## check if table is in use + ## + for((jj=0; jj<${#TABLE_IN_USE_ARRAY[@]}; jj++)) + do + if [ "${TABLE_IN_USE_ARRAY[$jj]}" == "$TABLE" ]; then + return 0; + fi + done + + return 1; + +} + +addTABLE_TABLE_IN_USE_ARRAY() +{ + + local TABLE=$1 + local jj; + + ## + ## add table in empty slot + ## + for((jj=0; jj<${#TABLE_IN_USE_ARRAY[@]}; jj++)) + do + if [ "${TABLE_IN_USE_ARRAY[$jj]}" == "" ]; then + TABLE_IN_USE_ARRAY[$jj]=$TABLE; + return 0; + fi + done + + return 1; +} + +removeTABLE_TABLE_IN_USE_ARRAY() +{ + + local TABLE=$1 + local jj; + ## + ## remove entry + ## + for((jj=0; jj<${#TABLE_IN_USE_ARRAY[@]}; jj++)) + do + if [ "${TABLE_IN_USE_ARRAY[$jj]}" == "$TABLE" ]; then + TABLE_IN_USE_ARRAY[$jj]=""; + return 0; + fi + done + + return 1; + +} + + +listTABLE_IN_USE_ARRAY() +{ + + local jj; + ## + ## list table entries + ## + for((jj=0; jj<${#TABLE_IN_USE_ARRAY[@]}; jj++)) + do + log 5 "TABLE_IN_USE_ARRAY $jj ${TABLE_IN_USE_ARRAY[$jj]}"; + done + + return 0; + +} + +getValidTablesToReorg() +{ + + getValidTableSizes + + VALID_TABLES_TO_REORG="" + VALID_TABLES_TO_REORG_RAW="" + NUM_VALID_TABLES_TO_REORG=0 + for TABNAME in $VALID_TABLES + do + + RAW=$( db2 -x "call REORGCHK_TB_STATS('T','$SCHEMANAME_IN.$TABNAME')" ); + RAW=$( echo "$RAW" | grep $SCHEMANAME_IN | grep $TABNAME | awk '{ if (NF == 12) print $0 }' | sed 's/ \+/ /g' | grep $REORG ) + rc=$? + if [ $rc -eq 0 ]; then + TABNAME=$( echo "$RAW" | awk '{print $2}' ); + [ "$VALID_TABLES_TO_REORG_RAW_DATA" == "" ] && VALID_TABLES_TO_REORG_RAW_DATA="$RAW" || VALID_TABLES_TO_REORG_RAW_DATA="$VALID_TABLES_TO_REORG_RAW_DATA\n$RAW" + fi + + done + + ## sort the tables based on REORG column + VALID_TABLES_TO_REORG_RAW_DATA=$( echo -e "$VALID_TABLES_TO_REORG_RAW_DATA" | sort -k12 -r); + VALID_TABLES_TO_REORG=$( echo -e "$VALID_TABLES_TO_REORG_RAW_DATA" | awk '{print $2}' ); + NUM_VALID_TABLES_TO_REORG=$( echo -e "$VALID_TABLES_TO_REORG" | wc -l ); + + return 0 + +} + +getValidIndexesToReorg() +{ + + getValidTableSizes + + VALID_INDEXES_TO_REORG="" + VALID_INDEXES_TO_REORG_RAW="" + NUM_VALID_INDEXES_TO_REORG=0 + for TABNAME in $VALID_TABLES + do + + RAW=$( db2 -x "call REORGCHK_IX_STATS('T','$SCHEMANAME_IN.$TABNAME')" ); + ## this can return multiple indexes for same TABNAME + RAW=$( echo "$RAW" | grep $SCHEMANAME_IN | grep $TABNAME | awk '{ if (NF == 21) print $0 }' | sed 's/ \+/ /g' | grep $REORG ); + rc=$? + if [ $rc -eq 0 ]; then + INDNAME=$( echo "$RAW" | awk '{print $1"."$2"."$3"."$4}' ); + [ "$VALID_INDEXES_TO_REORG_RAW_DATA" == "" ] && VALID_INDEXES_TO_REORG_RAW_DATA=$RAW || VALID_INDEXES_TO_REORG_RAW_DATA="$VALID_INDEXES_TO_REORG_RAW_DATA\n$RAW" + fi + + done + + ## sort the indexes based on REORG column + VALID_INDEXES_TO_REORG_RAW_DATA=$( echo -e "$VALID_INDEXES_TO_REORG_RAW_DATA" | sort -k21 -r); + VALID_INDEXES_TO_REORG=$( echo -e "$VALID_INDEXES_TO_REORG_RAW_DATA" | awk '{print $1"."$2"."$3"."$4}' ); + NUM_VALID_INDEXES_TO_REORG=$( echo -e "$VALID_INDEXES_TO_REORG" | wc -l ); + + return 0 + +} + +getValidFragmentedIndexes() +{ + + ## + ## http://www.ibm.com/developerworks/data/library/techarticle/dm-1307optimizerunstats/#Listing%207 + ## + + getValidTableSizes + + VALID_FRAGMENTED_INDEXES_RAW_DATA=$( db2 -x "select rtrim(tabschema)||' '||rtrim(tabname)||' '||rtrim(indschema)||' '||rtrim(indname) + ||' '||indcard||' '||stats_time||' '||lastused||' '||nleaf||' '||sequential_pages + from syscat.indexes where tabschema='$SCHEMANAME_IN' + and not (nleaf = 1 and sequential_pages = 0) + and not (nleaf = 0 and sequential_pages = 1) + and (nleaf - sequential_pages > 10) + and tabname in ( $VALID_TABLES_FORMATTED ) + order by tabname + with ur"; ); + + VALID_FRAGMENTED_INDEXES_RAW_DATA=$(echo "${VALID_FRAGMENTED_INDEXES_RAW_DATA}" | sed 's/ *$//g' ); + VALID_FRAGMENTED_INDEXES=$( echo "${VALID_FRAGMENTED_INDEXES_RAW_DATA}" | sed 's/ *$//g' | cut -d' ' -f2 | uniq ) + NUM_VALID_FRAGMENTED_INDEXES=$( echo "${VALID_FRAGMENTED_INDEXES}" | wc -l ) + + +} + +getValidTableSizes() +{ + + VALID_TABLE_SIZES_RAW_DATA=$( db2 "select t0.tabname, + ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) as TOTAL_TABLE_MB, + cast ((INDEX_OBJECT_P_SIZE / 1024) as integer) as INDEX_SIZE_MB + from SYSIBMADM.ADMINTABINFO t0, SYSCAT.TABLES t1 + where t0.tabschema='$SCHEMANAME_IN' + and t0.tabschema=t1.tabschema + and t0.tabname=t1.tabname + $IGNORE_TABLES + and ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) < $IGNORE_TABLE_SIZE_THRESHOLD_MAX + and ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) > $IGNORE_TABLE_SIZE_THRESHOLD_MIN + order by 2 desc + with ur"; ); + rc=$? + if [ $rc -eq 0 ]; then + VALID_TABLE_SIZES=$( echo "$VALID_TABLE_SIZES_RAW_DATA" | sed '1,3d' | sed '$d' | sed '$d' ); + VALID_TABLE_SIZES=$( echo "$VALID_TABLE_SIZES" | awk 'BEGIN {ORS="\t"} { for(ii=1 ; ii<=NF ; ii++) print $ii; printf "\n"; }'); + VALID_TABLES=$( echo "$VALID_TABLE_SIZES" | awk '{print $1}' ); + NUM_VALID_TABLES=$( echo "$VALID_TABLE_SIZES" | wc -l ); + # log 3 "NUM_VALID_TABLES=$NUM_VALID_TABLES" + + VALID_TABLES_FORMATTED="" + for TABLE in $VALID_TABLES + do + VALID_TABLES_FORMATTED="$VALID_TABLES_FORMATTED'$TABLE'," + done + VALID_TABLES_FORMATTED=$( echo "$VALID_TABLES_FORMATTED" | sed 's/,$//g' ) + else + VALID_TABLES_FORMATTED="'UNKNOWN_TABNAME'" + fi + +} + +## is TABLE within size limits < IGNORE_TABLE_SIZE_THRESHOLD_MAX and > IGNORE_TABLE_SIZE_THRESHOLD_MIN +isTableWithinSizeLimit() +{ + local SCHEMANAME=$1 + local TABNAME=$2 + local RC="" + local rc=0 + + RC=$( db2 -x "select tabname, + ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) as TOTAL_TABLE_MB + from SYSIBMADM.ADMINTABINFO + where tabschema='$SCHEMANAME' + and tabname = '$TABNAME' + and ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) < $IGNORE_TABLE_SIZE_THRESHOLD_MAX + and ( ( DATA_OBJECT_P_SIZE + INDEX_OBJECT_P_SIZE + LONG_OBJECT_P_SIZE + LOB_OBJECT_P_SIZE + XML_OBJECT_P_SIZE ) / 1024 ) > $IGNORE_TABLE_SIZE_THRESHOLD_MIN + order by 2 desc + with ur" ); + + rc=$? + return $rc + +} + +## create an list/array of table objects to reorg based on tabnames +createTableOBJECT_ARRAY() +{ + local TABNAMES="$1" + local OBJECT_REORG_TABLE_TYPE=$2 + + ## + ## make the OBJECT_ARRAY for tables and indexes + ## + if [ -z "$OBJECT_ARRAY" ]; then + local let index=0; + else + local let index=${#OBJECT_ARRAY[@]}; + fi + local TID=0 ## Table ID - always 0 for online table reorg + local INDSCHEMA=NULL; + local INDNAME=NULL; + local LOCK_COUNT=0 + + for TABNAME in $TABNAMES + do + ## we need TABLEID, TBSPACEID for IF_STATS as the full TableName: may not be dispalyed in the db2pd output + local RC=$( db2 -x "select TABLEID, TBSPACEID from syscat.tables where tabname='$TABNAME' and tabschema='$SCHEMANAME_IN'" ) + local rc=$? + if [ $rc -eq 0 ]; then + local TABLEID=$( echo $RC | awk '{print $1}' ); + local TBSPACEID=$( echo $RC | awk '{print $2}' ); + OBJECT_ARRAY[$index]="$SCHEMANAME_IN#$TABNAME#$INDSCHEMA#$INDNAME#$TID#NOTSTARTED#2019-01-01-00.00.00#2019-01-01-00.00.00#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE" + let index+=1 + fi + done + +} + +## create an list/array of table objects to reorg based on indnames +createIndexOBJECT_ARRAY() +{ + + local INDNAMES="$1" + local OBJECT_REORG_TABLE_TYPE=$2 + local let index=0 + local TABLEID=9999; + local TBSPACEID=9999; + local LOCK_COUNT=0; + + for INDEX in $INDNAMES + do + local TABSCHEMA=$( echo $INDEX | cut -d. -f1); + local TABNAME=$( echo $INDEX | cut -d. -f2); + local INDSCHEMA=$( echo $INDEX | cut -d. -f3); + local INDNAME=$( echo $INDEX | cut -d. -f4); + local RC=$( db2 -x "select IID from syscat.indexes where tabschema = '$TABSCHEMA' and tabname = '$TABNAME' and indschema = '$INDSCHEMA' and indname = '$INDNAME'"); + local rc=$? + if [ $rc -eq 0 ]; then + local IID=$( echo $RC | cut -d' ' -f1); + OBJECT_ARRAY[$index]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#NOTSTARTED#2019-01-01-00.00.00#2019-01-01-00.00.00#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE" + let index+=1 + fi + done + +} + +## +## list out the objects and state +## this can be used for debugging +## +listOBJECT_ARRAY() +{ + + local ii; + log 3 "The following is for debug purposes, Num objects=${#OBJECT_ARRAY[@]}, $OBJECT_NUM_TB_STATS:$OBJECT_NUM_IX_STATS:$OBJECT_NUM_IF_STATS" + for((ii=0; ii< ${#OBJECT_ARRAY[@]}; ii++)) + do + echo "${OBJECT_ARRAY[$ii]}" | tee -a $REORG_TABLE_INDEX_DEBUG + done + +} + +## get the number tb_stats, ix_stats and if_stats objects +getNUM_OBJECT_REORG_TABLE_TYPE_OBJECT_ARRAY() +{ + + local rc=0; + local ii; + for((ii=0; ii< ${#OBJECT_ARRAY[@]}; ii++)) + do + if [ $( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f12 ) -eq $1 ]; then + let rc+=1; + fi + done + + return $rc; + +} + +## +## the main event +## +reorgTables() +{ + + ## variables that need to be reset on each run of the function + NUM_REORGS_IN_PROGRESS=0; + NUM_REORGS_KICKED_OFF=0; + NUM_REORGS_COMPLETED=0; + NUM_REORGS_STOPPED=0; + NUM_REORGS_ABORTED=0; + + while true + do + + ## check reorg window maintenance time + MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS=$( date '+%s' ); + DIFF=$(( MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS - REORG_TIMEOUT_WINDOW_START_TIME_SECONDS )); + + ## safety valve - if for some reason the logic can't stop the reorgs + if [ $DIFF -ge $(( REORG_TIMEOUT_WINDOW_SECONDS + REORG_TIMEOUT_OVERFLOW_VALVE )) ]; then + log 1 "REORG_TIMEOUT_OVERFLOW_VALVE detected"; + log 1 "Aborting reorgs" + break; + fi + + if [ $DIFF -ge $REORG_TIMEOUT_WINDOW_SECONDS ]; then + + REORG_TIMEOUT_WINDOW_COMPLETED=1; + + ## -twa: timeout window action: default=3 + ## 1=allow current reorg(s) to continue + ## 2=stop current reorg(s) + ## 3=stop current reorg(s) if < 80% complete + if [ $REORG_TIMEOUT_WINDOW_ACTION -eq 1 ]; then + + log 3 "Reorg window ending, reorg window time exceeded, twa=$REORG_TIMEOUT_WINDOW_ACTION" + break + + elif [ $REORG_TIMEOUT_WINDOW_ACTION -eq 2 ]; then + + ## use of the REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER + ## 0 = ABORT those not started and issue a STOP to those STARTED + ## 1 = loop again and see if script exits as all reorgs are COMPLETED and STOPPED and ABORTED + ## 2 = break out + if [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 0 ]; then + let REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER+=1 + log 3 "Reorg window ending, reorg window time exceeded, twa=$REORG_TIMEOUT_WINDOW_ACTION" + log 3 "Aborting reorgs NOTSTARTED and issuing a STOP to those that are STARTED" + elif [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 1 ]; then + let REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER+=1 + log 3 "REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER=$REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER" + elif [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 2 ]; then + log 3 "REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER=$REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER" + log 3 "breaking out of reorg loop" + break; + fi + + fi + + fi + + ## loop for all OBJECTS - extract relevant data from OBJECT array + ## if we have NOTSTARTED in the OBJECT_ARRAY[N] - then kick off a reorg + ## then check tables reorg status + for((ii=0; ii< ${#OBJECT_ARRAY[@]}; ii++)) + do + ## get table related info + TABSCHEMA=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f1 ); + TABNAME=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f2 ); + INDSCHEMA=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f3 ); + INDNAME=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f4 ); + IID=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f5 ); + OBJECT_REORG_STATUS=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f6 ); + OBJECT_REORG_START=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f7 ); + OBJECT_REORG_END=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f8 ); + TABLEID=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f9 ); + TBSPACEID=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f10 ); + LOCK_COUNT=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f11 ); + OBJECT_REORG_TABLE_TYPE=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f12 ); + isTable=0; + isIndex=0; + if [ "$INDSCHEMA" == "NULL" -a "$INDNAME" == "NULL" ]; then + isTable=1; + else + isIndex=1; + fi + + if [ $OBJECT_REORG_TABLE_TYPE -ne $REORG_TABLE_TYPE ]; then + continue; + fi + + # log 5 "${OBJECT_ARRAY[$ii]}" + + ## + ## has OBJECT COMPLETED - no need to continue here + ## + if [ "$OBJECT_REORG_STATUS" == "COMPLETED" -o "$OBJECT_REORG_STATUS" == "STOPPED" -o "$OBJECT_REORG_STATUS" == "ABORTED" ]; then + continue; + fi + + if [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + + ## + ## query db2 for REORG_STATUS etc - there may not be an entry so carry on + ## + RC_SNAP=$( db2 -x "select REORG_STATUS, REORG_COMPLETION, REORG_PHASE, REORG_CURRENT_COUNTER, REORG_MAX_COUNTER, REORG_START, REORG_END, REORG_INDEX_ID, REORG_TBSPC_ID from table(snap_get_tab_reorg('')) where tabschema='$TABSCHEMA' and tabname='$TABNAME' and REORG_START > TIMESTAMP('$WINDOW_START_TIME_DB2') and REORG_INDEX_ID=$IID"); + rc=$? + if [ $rc -ge 2 ]; then + log 0 "Possible error running select query against db2\nrc=$rc\nRC=$RC" + continue; + fi + + if [ $rc -eq 0 ]; then + RC=$( echo "$RC_SNAP" | awk 'BEGIN {ORS="\t"} { for(ii=1 ; ii<=NF ; ii++) print $ii; }') + REORG_STATUS=$( echo "$RC_SNAP" | awk '{print $1}' ); + REORG_COMPLETION=$( echo "$RC_SNAP" | awk '{print $2}' ); + REORG_CURRENT_COUNTER=$( echo "$RC_SNAP" | awk '{print $4}' ); + REORG_MAX_COUNTER=$( echo "$RC_SNAP" | awk '{print $5}' ); + REORG_START=$( echo "$RC_SNAP" | awk '{print $6}' ); + REORG_END=$( echo "$RC_SNAP" | awk '{print $7}' ); + REORG_INDEX_ID=$( echo "$RC_SNAP" | awk '{print $8}' ); + + REORG_PERCENT_COMPLETE=0 + if [ ! -z "$REORG_CURRENT_COUNTER" -a $REORG_CURRENT_COUNTER -gt 0 ]; then + if [ ! -z "$REORG_MAX_COUNTER" -a $REORG_MAX_COUNTER -gt 0 ]; then + if [ $REORG_MAX_COUNTER -ge $REORG_CURRENT_COUNTER ]; then + REORG_PERCENT_COMPLETE=$( echo $REORG_CURRENT_COUNTER $REORG_MAX_COUNTER | awk '{ print int (($1/$2)*100) }' ); + fi + fi + fi + fi + + # log 5 "RC_SNAP=$RC_SNAP" + + elif [ $IF_STATS -eq 3 ]; then + + DB2PD_REORG_INDEX_RECORD=$( db2pd -db $DBNAME -reorgs index | grep -B1 -A11 -w "^TbspaceID: $TBSPACEID" | grep -B1 -A11 -w "TableID: $TABLEID" ); + rc=$? + if [ $rc -eq 0 ]; then + REORG_START=$(echo "$DB2PD_REORG_INDEX_RECORD" | grep '^Start Time:' | awk '{ print $3,$4}' ); + REORG_START_SECONDS=$(date --d="$REORG_START" '+%s'); + + if [ $REORG_START_SECONDS -ge $IF_STATS_WINDOW_START_TIME_DB2 ]; then + REORG_STATUS=$(echo "$DB2PD_REORG_INDEX_RECORD" | grep '^Status:' | awk '{ $1=""; print $0}' | sed 's/^[ \t]*//;s/[ \t]*$//' ); + ## some differences between snap_get_tab_reorg and db2pd -reogrs index output + if [ "$REORG_STATUS" == "In Progress" ]; then + REORG_STATUS="STARTED"; + elif [ "$REORG_STATUS" == "Completed" ]; then + REORG_STATUS="COMPLETED"; + elif [ "$REORG_STATUS" == "Stopped" ]; then + REORG_STATUS="STOPPED"; + fi + + REORG_END=$(echo "$DB2PD_REORG_INDEX_RECORD" | grep '^Start Time:' | grep 'End Time:' | awk '{ print $7,$8}' ); + fi + + fi + + + fi + + + ## + ## has OBJECT been KICKED_OFF or STARTED + ## if it has check to see if is STARTED or COMPLETED + ## update OBJECT_ARRAY + ## update COMPLETION/STOPPED stats + ## + if [ "$OBJECT_REORG_STATUS" == "KICKED_OFF" ] || [ "$OBJECT_REORG_STATUS" == "STARTED" ]; then + if [ ! -z "$REORG_STATUS" ]; then + if [ "$REORG_STATUS" == "STARTED" -o "$REORG_STATUS" == "COMPLETED" -o "$REORG_STATUS" == "STOPPED" ]; then + OBJECT_ARRAY[$ii]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#$REORG_STATUS#$REORG_START#$REORG_END#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE" + fi + + if [ "$REORG_STATUS" == "COMPLETED" -o "$REORG_STATUS" == "STOPPED" ]; then + + removeTABLE_TABLE_IN_USE_ARRAY "$TABSCHEMA.$TABNAME" + rc=$? + if [ $rc -eq 1 ]; then + log 1 "Failed to remove table $TABSCHEMA.$TABNAME from TABLE_IN_USE_ARRAY"; + fi + + if [ "$REORG_STATUS" == "COMPLETED" ]; then + let NUM_REORGS_COMPLETED+=1 + elif [ "$REORG_STATUS" == "STOPPED" ]; then + let NUM_REORGS_STOPPED+=1 + fi + + let NUM_REORGS_IN_PROGRESS-=1 + fi + + fi + + ## + ## OBJECT is NOSTARTED so KICK_OFF a reorg + ## + elif [ "$OBJECT_REORG_STATUS" == "NOTSTARTED" ]; then + + ## dont kick off any reorgs if window timeout passed and TWA=2 + ## OBJECTS become ABORTED - UPDATE OBJECT_ARRAY + if [ $REORG_TIMEOUT_WINDOW_COMPLETED -eq 1 ] && [ $REORG_TIMEOUT_WINDOW_ACTION -eq 2 ] && [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 1 ]; then + REORG_STATUS=ABORTED; + OBJECT_ARRAY[$ii]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#$REORG_STATUS#$OBJECT_REORG_START#$OBJECT_REORG_END#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE"; + let NUM_REORGS_ABORTED+=1; + continue; + + fi + + ## is the TABLE already being used -if it is goto next OBJECT + existTABLE_TABLE_IN_USE_ARRAY "$TABSCHEMA.$TABNAME" + rc=$? + [ $rc -eq 0 ] && continue; + + ## we only want to kick off so many reorgs at any one time + if [ $NUM_REORGS_IN_PROGRESS -eq $MAX_ASYNC_REORGS_ALLOWED ]; then + continue; + fi + + ## + ## The table could already be locked - if it is then by-pass it + ## and ABORT if locked more than 10 times + ## + TABLE_LOCKED=$( db2 "select APPLICATION_HANDLE, LOCK_OBJECT_TYPE, LOCK_MODE, LOCK_CURRENT_MODE, LOCK_STATUS, LOCK_COUNT, LOCK_HOLD_COUNT, TBSP_ID, TAB_FILE_ID from TABLE (MON_GET_LOCKS(NULL, -2)) where TBSP_ID=$TBSPACEID and TAB_FILE_ID=$TABLEID and LOCK_OBJECT_TYPE='TABLE' and LOCK_MODE='IX' with ur"; ); + rc=$? + if [ $rc -eq 0 ]; then + let LOCK_COUNT+=1; + log 1 "Appears table $TABSCHEMA.$TABNAME is already locked by another application(s), LOCK_COUNT=$LOCK_COUNT"; + log 1 "$TABLE_LOCKED"; + if [ $LOCK_COUNT -gt 10 ]; then + OBJECT_REORG_STATUS=ABORTED; + let NUM_REORGS_ABORTED+=1; + log 1 "Aborting table $TABSCHEMA.$TABNAME , LOCK_COUNT=$LOCK_COUNT"; + fi +OBJECT_ARRAY[$ii]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#$OBJECT_REORG_STATUS#$OBJECT_REORG_START#$OBJECT_REORG_END#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE"; + continue; + fi + + ## + ## do a check to see where we are on transaction log space - this could be improved!!!! + ## + LOG_USED=$( db2 "select cast(LOG_UTILIZATION_PERCENT as decimal(5,2)) as PCTUSED, cast((TOTAL_LOG_USED_KB/1024) as Integer) as TOTUSEDMB, cast((TOTAL_LOG_AVAILABLE_KB/1024) as Integer) as TOTAVAILMB, cast((TOTAL_LOG_USED_TOP_KB/1024) as Integer) as TOTUSEDTOPMB FROM SYSIBMADM.LOG_UTILIZATION "); + if [ ! -z "$LOG_USED" ]; then + PCTUSED=$( echo "$LOG_USED" | awk '{ if(NF==4 && $2 ~/^[0-9]+$/) print int($1)}' ); + if [ ! -z "$PCTUSED" ]; then + if [ $PCTUSED -gt $TRANSACTION_LOG_THRESHOLD_PCT ]; then + log 1 "Will not kick off another reorg due to logfile PCTUSED above threshold of $LOG_THRESHOLD\n$LOG_USED" + continue + else + log 3 "$LOG_USED" + fi + fi + fi + + ## + ## kick off another reorg + ## if rc=0 then ok, else we ABORT the OBJECT and don't try again + ## + log 3 "" + + if [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + + if [ $isTable -eq 1 ]; then + + db2 -v "reorg table $TABSCHEMA.$TABNAME inplace allow write access" + rc=$? + + elif [ $isIndex -eq 1 ]; then + db2 -v "reorg table $TABSCHEMA.$TABNAME index $INDSCHEMA.$INDNAME inplace allow write access" + rc=$? + fi + + elif [ $IF_STATS -eq 3 ]; then + + ## for offline we throw a job at db2 and wait a few seconds and check the output + ## output could be "reorg indexes all for table ...", + ## or SQL error + ## or 'DB20000I The REORG command completed successfully.' + ## not sure if there is a better way to do this + TMPLOG="/tmp/$TABSCHEMA.$TABNAME.tmp"; + db2 -v "reorg indexes all for table $TABSCHEMA.$TABNAME allow write access" > $TMPLOG 2>&1 & + sleep 5; + cat $TMPLOG; + RC=$( grep '^SQL' $TMPLOG); + rc=$? + if [ $rc -eq 0 ]; then + log 1 "Failed to kick off reorg\n$RC"; + rc=1; + else + ## reorg could have finished then no need for the big sleep + RC=$( grep 'DB20000I The REORG command completed successfully.' $TMPLOG); + if [ $? -eq 0 ]; then + IF_STATS_BYPASS_SLEEP_INTERVAL_TIME=1; + fi + rc=0; + + fi + rm -f $TMPLOG; + + fi + + if [ $rc -eq 0 ]; then + REORG_STATUS=KICKED_OFF; + else + REORG_STATUS=ABORTED; + fi + OBJECT_ARRAY[$ii]="$TABSCHEMA#$TABNAME#$INDSCHEMA#$INDNAME#$IID#$REORG_STATUS#$OBJECT_REORG_START#$OBJECT_REORG_END#$TABLEID#$TBSPACEID#$LOCK_COUNT#$OBJECT_REORG_TABLE_TYPE"; + if [ $rc -ne 0 ]; then + let NUM_REORGS_ABORTED+=1; + continue + fi + + ## add the table to the TABLE_IN_USE_ARRAY + addTABLE_TABLE_IN_USE_ARRAY "$TABSCHEMA.$TABNAME" + rc=$? + if [ $rc -eq 1 ]; then + log 1 "Failed to add table $TABSCHEMA.$TABNAME to TABLE_IN_USE_ARRAY"; + fi + let NUM_REORGS_KICKED_OFF+=1; + let NUM_REORGS_IN_PROGRESS+=1; + + continue; + + fi + + ## + ## ouput STATUS + ## + if [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + log 3 "$NUM_REORGS_IN_PROGRESS:$NUM_REORGS_KICKED_OFF:$NUM_REORGS_ABORTED:$NUM_REORGS_STOPPED:$NUM_REORGS_COMPLETED:$NUM_REORG_OBJECTS $TABSCHEMA.$TABNAME : $RC : $REORG_PERCENT_COMPLETE %" | tee -a $REORG_TABLE_INDEX_DEBUG + elif [ $IF_STATS -eq 3 ]; then + log 3 "$NUM_REORGS_IN_PROGRESS:$NUM_REORGS_KICKED_OFF:$NUM_REORGS_ABORTED:$NUM_REORGS_STOPPED:$NUM_REORGS_COMPLETED:$NUM_REORG_OBJECTS $TABSCHEMA.$TABNAME \n$DB2PD_REORG_INDEX_RECORD "| tee -a $REORG_TABLE_INDEX_DEBUG + + fi + + ## + ## if reorg timeout then issue a stop to current reorgs that are STARTED + ## no error checking for stopping a reorg + ## no need to update OBJECT array as it will be updated on next loop + ## + + if [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + + if [ "$REORG_STATUS" == "STARTED" ] && [ $REORG_TIMEOUT_WINDOW_COMPLETED -eq 1 ] && [ $REORG_TIMEOUT_WINDOW_ACTION -eq 2 ] && [ $REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER -eq 1 ]; then + + + if [ $isTable -eq 1 ]; then + db2 -v "reorg table $TABSCHEMA.$TABNAME inplace stop" + rc=$? + elif [ $isIndex -eq 1 ]; then + db2 -v "reorg table $TABSCHEMA.$TABNAME index $INDSCHEMA.$INDNAME inplace stop" + rc=$? + fi + + fi + + fi + + done ## for OBJECT_ARRAY[@] + + ## check if we are done with all OBJECTS + if [ $((NUM_REORGS_COMPLETED + NUM_REORGS_STOPPED + NUM_REORGS_ABORTED)) -ge $NUM_REORG_OBJECTS ]; then + log 3 "All reorgs are completed, stopped or aborted, $NUM_REORGS_COMPLETED:$NUM_REORGS_STOPPED:$NUM_REORGS_ABORTED:$NUM_REORG_OBJECTS" + break + fi + + ## wait some time + if [ $IF_STATS -eq 3 ] && [ $IF_STATS_BYPASS_SLEEP_INTERVAL_TIME -eq 1 ]; then + sleep 1; + IF_STATS_BYPASS_SLEEP_INTERVAL_TIME=0; + else + sleep $SLEEP_INTERVAL_TIME + fi + + done ## while true + +} + + +## init +if [ -f ${HOME}/sqllib/db2profile ]; then + . ${HOME}/sqllib/db2profile +fi + +## script already running ? +if [ $( ps -ef | grep $0 | grep -v grep | wc -l ) -gt 2 ]; then + echo "Warning: appears $0 already running" + echo "$( ps -ef | grep $0 | grep -v grep )"; + exit 1 +fi + +SCRIPT=$(basename $0) +SCRIPT_DIR=$(dirname $0) +WHOAMI=$(whoami) +HOSTNAME=$(hostname) + +## setup some temp work files +LOGDATE=$(date '+%Y%m%d'); +REORG_TABLE_INDEX_LOG=/tmp/${SCRIPT}.tmp.123.log +rm -f $REORG_TABLE_INDEX_LOG +REORG_TABLE_INDEX_DEBUG=/tmp/${SCRIPT}.debug +rm -f $REORG_TABLE_INDEX_DEBUG + +## control variables +LIST_ONLY=0 +LIST_FRAGMENTED_INDEXES=0 +LIST_VALID_TABLE_SIZES=0 +LIST_REORGCHK_TB_STATS_TABLES=0 +LIST_REORGCHK_IX_STATS_TABLES=0 +EXECUTE_TABLE_REORG=0 +IGNORE_TABLE_SIZE_THRESHOLD_MAX=20000; +IGNORE_TABLE_SIZE_THRESHOLD_MIN=10; +MAINTENANCE_TIMEOUT_WINDOW_MINUTES=240; +REORG_TIMEOUT_WINDOW_ACTION=2; +MAX_ASYNC_REORGS_ALLOWED=3; +TRANSACTION_LOG_THRESHOLD_PCT=90; +TB_STATS=0; +IF_STATS=0; +IX_STATS=0; +TRSI=0 +IGNORE_TABLES_EX=" and t0.tabname not like '%\_H' escape '\' and t1.volatile != 'C' " +IGNORE_TABLES=""; +REORG="*"; +SLEEP_INTERVAL_TIME=60; +REORGCHK_TB_IF_STATS_OPTION=""; +REORGCHK_TB_STATS=1; +REORGCHK_IF_STATS=3; + +## user check +if [ $WHOAMI == "root" ]; then + log 0 " This script should be not run as '$WHOAMI', but as instance owner." + exit 1 +fi + +## +## command line arguments +## +while [ $# -gt 0 ] +do + case $1 in + -h|-H|-help|--help) UsageHelp; exit 1 ;; + + -db) shift; [ ! -z $1 ] && DB=$( echo $1 | tr '[a-z]' '[A-Z]' ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + -s) shift; [ ! -z $1 ] && SCHEMANAME_IN=$( echo $1 | tr '[a-z]' '[A-Z]' ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + -t) shift; [ ! -z "$1" ] && TABLE_IN=$( echo "$1" | tr '[a-z]' '[A-Z]' ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + -tb_stats) REORGCHK_TB_IF_STATS_OPTION+=$REORGCHK_TB_STATS; TB_STATS=1 ;; + -ti) shift; [ ! -z "$1" ] && INDEX_IN=$( echo "$1" | tr '[a-z]' '[A-Z]' ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + -ix_stats) IX_STATS=2 ;; + -if_stats) REORGCHK_TB_IF_STATS_OPTION+=$REORGCHK_IF_STATS; IF_STATS=3 ;; + -ittx) shift; isNumeric $1 && { IGNORE_TABLE_SIZE_THRESHOLD_MAX=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -ittn) shift; isNumeric $1 && { IGNORE_TABLE_SIZE_THRESHOLD_MIN=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + + -l) LIST_ONLY=1 ;; + -lf) LIST_FRAGMENTED_INDEXES=1 ;; + -ls) LIST_VALID_TABLE_SIZES=1 ;; + -lt) LIST_REORGCHK_TB_STATS_TABLES=1 ;; + -li) LIST_REORGCHK_IX_STATS_TABLES=1 ;; + + -window) shift; isNumeric $1 && { MAINTENANCE_TIMEOUT_WINDOW_MINUTES=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -twa) shift; isNumeric $1 && { REORG_TIMEOUT_WINDOW_ACTION=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -mar) shift; isNumeric $1 && { MAX_ASYNC_REORGS_ALLOWED=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -log) shift; isNumeric $1 && { TRANSACTION_LOG_THRESHOLD_PCT=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + -tr) EXECUTE_TABLE_REORG=1 ;; + + -trsi) TRSI=1 ;; + -reorg) shift; [ ! -z "$1" ] && REORG=$( echo "$1" ) || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + + -ignore) shift; [ ! -z "$1" ] && { IGNORE_TABLES="$1"; } || { echo "Error: Must enter an argument for this option"; UsageHelp; exit 1 ; } ;; + + -sleep) shift; isNumeric $1 && { SLEEP_INTERVAL_TIME=$1; } || { echo "Error: Must enter an numeric argument for this option"; UsageHelp; exit 1 ; } ;; + + (-*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;; + (*) break;; + esac + + shift + +done + +## +## some verification +## +if [ -z "$SCHEMANAME_IN" ]; then + log 0 "must enter a schemaname" + exit 1 +fi + +CHECK=1 +if [ $CHECK -eq 0 ]; then + rc=0 + if [ $TB_STATS -eq 1 ] && [ $IX_STATS -eq 2 -o $IF_STATS -eq 3 ]; then + rc=1; + elif [ $IX_STATS -eq 2 ] && [ $TB_STATS -eq 1 -o $IF_STATS -eq 3 ]; then + rc=1; + elif [ $IF_STATS -eq 3 ] && [ $TB_STATS -eq 1 -o $IX_STATS -eq 2 ]; then + rc=1; + fi + if [ $rc -eq 1 ]; then + log 0 "can't define more than one of -tb_stats, -ix_stats or -if_stats" + exit 1 + fi + +fi ## CHECK + +if [ $TB_STATS -eq 1 ] && [ ! -z "$INDEX_IN" ]; then + log 0 "can't define -ti with -tb_stats" + exit 1 +elif [ $IX_STATS -eq 2 ] && [ ! -z "$TABLE_IN" ]; then + log 0 "can't define -t with -ix_stats" + exit 1 +elif [ $IF_STATS -eq 3 ] && [ ! -z "$INDEX_IN" ]; then + log 0 "can't define -ti with -if_stats" + exit 1 +fi + +if [ $TRANSACTION_LOG_THRESHOLD_PCT -gt 99 ]; then + log 0 "-log option should be less than 100, TRANSACTION_LOG_THRESHOLD_PCT=$TRANSACTION_LOG_THRESHOLD_PCT" + exit 1 +fi +if [ $IGNORE_TABLE_SIZE_THRESHOLD_MIN -ge $IGNORE_TABLE_SIZE_THRESHOLD_MAX ]; then + log 0 "option -ittx should be greater than option -ittn" + exit 1 +fi + +## override some defaults for offline reorgs +#if [ $IF_STATS -eq 3 ]; then +# MAX_ASYNC_REORGS_ALLOWED=1; +# REORG_TIMEOUT_WINDOW_ACTION=1; +# ## make SLEEP_INTERVAL_TIME 1/3 for IF_STATS +# SLEEP_INTERVAL_TIME=$( echo $SLEEP_INTERVAL_TIME | awk '{ print $1/3 }'); +#fi + +## fixup REORG filter string for grep +REORG=$( echo "$REORG" | sed 's/*/\\*/g' | sed 's/-/\\-/g'); + +## need to transform to seconds - easier to work with +## 3/4 time is for reorgs , 1/4 for runstats +MAINTENANCE_TIMEOUT_WINDOW_SECONDS=$(( 60 * MAINTENANCE_TIMEOUT_WINDOW_MINUTES )) +REORG_TIMEOUT_WINDOW_SECONDS=$( echo $MAINTENANCE_TIMEOUT_WINDOW_SECONDS | awk '{ print int(0.75*$1) }'); +RUNSTATS_TIMEOUT_WINDOW_SECONDS=$( echo $MAINTENANCE_TIMEOUT_WINDOW_SECONDS | awk '{ print int(0.25*$1) }'); + +# echo "$MAINTENANCE_TIMEOUT_WINDOW_SECONDS $REORG_TIMEOUT_WINDOW_SECONDS $RUNSTATS_TIMEOUT_WINDOW_SECONDS" + +## +## main +## +log 3 "Starting $0 at $(date) on $HOSTNAME" + +DBNAMES=$( db2 list db directory | grep -E "alias|Indirect" | grep -B 1 Indirect | grep alias | awk '{print $4}' | sort ) + +## +## loops for all dbs +## +for DBNAME in $DBNAMES +do + + ## just process the one db + if [ ! -z "$DB" ] && [ "$DB" != "$DBNAME" ] ; then + continue + fi + + ## can't run script on a STANDBY db + ROLE=$(db2 "get db cfg for $DBNAME" | grep 'HADR database role' | cut -d '=' -f2 | sed 's/ *//g') + if [ -z "$ROLE" ] || [ "$ROLE" == "" ]; then + log 1 " Can't determine hadr database role from 'db2 get db cfg for $DBNAME'" + continue + elif [ "$ROLE" == "STANDBY" ]; then + log 1 " Can't run script '${0}' for $DBNAME with hadr database role '$ROLE'" + continue + fi + + log 3 "DB=$DBNAME ..." + + db2 connect to $DBNAME >> /dev/null 2>&1 + rc=$? + if [ $rc -ne 0 ]; then + log 0 " can't connect to $DBNAME" + continue + fi + + if [ $TRSI -eq 1 ]; then + TRSI + continue + + elif [ $LIST_VALID_TABLE_SIZES -eq 1 ]; then + getValidTableSizes + log 3 "The following $NUM_VALID_TABLES are valid table sizes within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB\n$VALID_TABLE_SIZES_RAW_DATA" + continue + + elif [ $LIST_FRAGMENTED_INDEXES -eq 1 ]; then + getValidFragmentedIndexes + VALID_FRAMENTED_INDEXES_HEADER="TABSCHEMA TABNAME INDSCHEMA INDNAME INDCARD STATS_TIME LAST_USED NLEAF SEQUENTIAL_PAGES"; + log 3 "The following $NUM_VALID_FRAGMENTED_INDEXES are fragmenated indexes based on tables sizes within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB\n$VALID_FRAMENTED_INDEXES_HEADER\n$VALID_FRAGMENTED_INDEXES_RAW_DATA" + continue; + elif [ $LIST_REORGCHK_TB_STATS_TABLES -eq 1 ]; then + getValidTablesToReorg + REORGCHK_TB_STATS_HEADER="TABLE_SCHEMA TABLE_NAME CARD OVERFLOW NPAGES FPAGES ACTIVE_BLOCKS TSIZE F1 F2 F3 REORG"; + log 3 "The following $NUM_VALID_TABLES_TO_REORG are results from REORGCHK_TB_STATS based on tables sizes within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB\n$REORGCHK_TB_STATS_HEADER\n$VALID_TABLES_TO_REORG_RAW_DATA" + continue; + + elif [ $LIST_REORGCHK_IX_STATS_TABLES -eq 1 ]; then + getValidIndexesToReorg + REORGCHK_IX_STAT_HEADER="TABLE_SCHEMA TABLE_NAME INDEX_SCHEMA INDEX_NAME INDCARD NLEAF NUM_EMPTY_LEAFS NLEVELS NUMRIDS_DELETED FULLKEYCARD LEAF_RECSIZE NONLEAF_RECSIZE LEAF_PAGE_OVERHEAD NONLEAF_PAGE_OVERHEAD PCT_PAGES_SAVED F4 F5 F6 F7 F8 REORG"; + log 3 "The following $NUM_VALID_INDEXES_TO_REORG are results from REORGCHK_IX_STATS based on tables sizes within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB\n$REORGCHK_IX_STAT_HEADER\n$VALID_INDEXES_TO_REORG_RAW_DATA" + continue; + + + fi + + + INPLACE=1 + if [ $INPLACE -eq 1 ]; then + + log 3 " SCHEMA: $SCHEMANAME_IN" + log 3 " TABLES: $TABLE_IN" + log 3 "INDEXES: $INDEX_IN" + + ## + ## input table(s) verification + ## verify input table exist + ## and table is within size limits + ## create the OBJECT_ARRAY that holds the relevant table information + ## + if [ ! -z "$TABLE_IN" ]; then + + TABLE_IN=$( echo "$TABLE_IN" | tr ' ' '\n' ); + for TABNAME in $TABLE_IN + do + + ## make sure table exists + RC=$( db2 -x "select tabname from syscat.tables where tabname = '$TABNAME' and tabschema = '$SCHEMANAME_IN' and type = 'T'"); + rc=$? + if [ $rc -ne 0 ]; then + log 0 "input command line table '$TABNAME' does not exist or is invalid" + exit 1 + fi + + isTableWithinSizeLimit $SCHEMANAME_IN $TABNAME + rc=$? + if [ $rc -ne 0 ]; then + log 0 "Table $TABNAME is not within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB" + exit 1 + fi + + done + + if [ $TB_STATS -eq 1 ]; then + createTableOBJECT_ARRAY "$TABLE_IN" $TB_STATS + elif [ $IF_STATS -eq 3 ]; then + createTableOBJECT_ARRAY "$TABLE_IN" $IF_STATS + fi + + elif [ ${#REORGCHK_TB_IF_STATS_OPTION} -eq 2 ]; then + for REORG_TYPE in 0 1 + do + if [ "${REORGCHK_TB_IF_STATS_OPTION:$REORG_TYPE:1}" == "1" ]; then + getValidTablesToReorg + TABLE_IN="$VALID_TABLES_TO_REORG"; + createTableOBJECT_ARRAY "$TABLE_IN" $TB_STATS + elif [ "${REORGCHK_TB_IF_STATS_OPTION:$REORG_TYPE:1}" == "3" ]; then + getValidFragmentedIndexes + TABLE_IN="$VALID_FRAGMENTED_INDEXES" + createTableOBJECT_ARRAY "$TABLE_IN" $IF_STATS + fi + done + + elif [ $TB_STATS -eq 1 ]; then + getValidTablesToReorg + TABLE_IN="$VALID_TABLES_TO_REORG"; + createTableOBJECT_ARRAY "$TABLE_IN" $TB_STATS + elif [ $IF_STATS -eq 3 ]; then + getValidFragmentedIndexes + TABLE_IN="$VALID_FRAGMENTED_INDEXES" + createTableOBJECT_ARRAY "$TABLE_IN" $IF_STATS + fi + + ## + ## input indexes verification + ## verify input indexes exist + ## + if [ ! -z "$INDEX_IN" ]; then + INDEX_IN=$( echo "$INDEX_IN" | tr ' ' '\n' ); + for INDEX in $INDEX_IN + do + ## make sure index exists, especially those input on command line + TABSCHEMA=$( echo $INDEX | cut -d. -f1); + TABNAME=$( echo $INDEX | cut -d. -f2); + INDSCHEMA=$( echo $INDEX | cut -d. -f3); + INDNAME=$( echo $INDEX | cut -d. -f4); + RC=$( db2 -x "select indname from syscat.indexes where tabschema = '$TABSCHEMA' and tabname = '$TABNAME' and indschema = '$INDSCHEMA' and indname = '$INDNAME'"); + rc=$? + if [ $rc -ne 0 ]; then + log 0 " input command line index '$INDEX' does not exist" + exit 1 + fi + + isTableWithinSizeLimit $TABSCHEMA $TABNAME + rc=$? + if [ $rc -ne 0 ]; then + log 0 "Table $TABNAME is not within the range $IGNORE_TABLE_SIZE_THRESHOLD_MIN MB and $IGNORE_TABLE_SIZE_THRESHOLD_MAX MB" + exit 1 + fi + + done + + createIndexOBJECT_ARRAY "$INDEX_IN" $IX_STATS; + ## + ## + elif [ $IX_STATS -eq 2 ]; then + getValidIndexesToReorg + INDEX_IN="$VALID_INDEXES_TO_REORG" + createIndexOBJECT_ARRAY "$INDEX_IN" $IX_STATS; + fi + + ## get the NUMBER of tables per reorg table type + getNUM_OBJECT_REORG_TABLE_TYPE_OBJECT_ARRAY $TB_STATS; OBJECT_NUM_TB_STATS=$?; + getNUM_OBJECT_REORG_TABLE_TYPE_OBJECT_ARRAY $IX_STATS; OBJECT_NUM_IX_STATS=$?; + getNUM_OBJECT_REORG_TABLE_TYPE_OBJECT_ARRAY $IF_STATS; OBJECT_NUM_IF_STATS=$?; + + ## just list out the OBJECT_ARRAY and exit + if [ $LIST_ONLY -eq 1 ]; then + + listOBJECT_ARRAY; + exit 1 + fi + + + if [ $EXECUTE_TABLE_REORG -eq 1 ]; then + + + ## + ## this is the main list of what we are going to reorg + ## + echo "" + + listOBJECT_ARRAY; + +# exit 1 + + ## + ## setup some control variables for the main loop + ## + ## REORG_STATUS COMPLETED PAUSED STARTED STOPPED TRUNCATE + ## + MAINTENANCE_TIMEOUT_WINDOW_START_TIME_SECONDS=$( date '+%s' ); + REORG_TIMEOUT_WINDOW_START_TIME_SECONDS=$( date '+%s' ); + WINDOW_START_TIME_DB2=$( date '+%Y-%m-%d-%H.%M.%S' ); + IF_STATS_WINDOW_START_TIME_DB2=$( date '+%s' ); + IF_STATS_BYPASS_SLEEP_INTERVAL_TIME=0; + NUM_REORG_OBJECTS="${#OBJECT_ARRAY[@]}"; +# NUM_REORGS_IN_PROGRESS=0; +# NUM_REORGS_KICKED_OFF=0; +# NUM_REORGS_COMPLETED=0; +# NUM_REORGS_STOPPED=0; +# NUM_REORGS_ABORTED=0; + REORG_TIMEOUT_OVERFLOW_VALVE=300; + REORG_TIMEOUT_WINDOW_COMPLETED=0; + REORG_TIMEOUT_WINDOW_COMPLETED_COUNTER=0; + initTABLE_IN_USE_ARRAY $MAX_ASYNC_REORGS_ALLOWED + + ## multi table reorg option + ## online table reorg and offline index reorgs have different options + MAX_ASYNC_REORGS_ALLOWED_ORG=$MAX_ASYNC_REORGS_ALLOWED; + REORG_TIMEOUT_WINDOW_ACTION_ORG=$REORG_TIMEOUT_WINDOW_ACTION; + SLEEP_INTERVAL_TIME_ORG=$SLEEP_INTERVAL_TIME; + + ## override some defaults for offline reorgs + #if [ $IF_STATS -eq 3 ]; then + # MAX_ASYNC_REORGS_ALLOWED=1; + # REORG_TIMEOUT_WINDOW_ACTION=1; + # ## make SLEEP_INTERVAL_TIME 1/3 for IF_STATS + # SLEEP_INTERVAL_TIME=$( echo $SLEEP_INTERVAL_TIME | awk '{ print $1/3 }'); + #fi + + echo "" + log 3 "Starting reorg of ..." + + if [ ${#REORGCHK_TB_IF_STATS_OPTION} -eq 2 ]; then + for REORG_TYPE in 0 1 + do + + if [ "${REORGCHK_TB_IF_STATS_OPTION:$REORG_TYPE:1}" == "1" ]; then + TB_STATS=1; + IF_STATS=0; + NUM_REORG_OBJECTS=$OBJECT_NUM_TB_STATS; + REORG_TABLE_TYPE=$TB_STATS; + MAX_ASYNC_REORGS_ALLOWED=$MAX_ASYNC_REORGS_ALLOWED_ORG; + REORG_TIMEOUT_WINDOW_ACTION=$REORG_TIMEOUT_WINDOW_ACTION_ORG; + SLEEP_INTERVAL_TIME=$SLEEP_INTERVAL_TIME_ORG; + + elif [ "${REORGCHK_TB_IF_STATS_OPTION:$REORG_TYPE:1}" == "3" ]; then + TB_STATS=0; + IF_STATS=3; + NUM_REORG_OBJECTS=$OBJECT_NUM_IF_STATS; + REORG_TABLE_TYPE=$IF_STATS; + MAX_ASYNC_REORGS_ALLOWED=1; + REORG_TIMEOUT_WINDOW_ACTION=1; + SLEEP_INTERVAL_TIME=$( echo $SLEEP_INTERVAL_TIME_ORG | awk '{ print $1/3 }'); + fi + + reorgTables + done + else + if [ $TB_STATS -eq 1 ]; then + REORG_TABLE_TYPE=$TB_STATS; + elif [ $IX_STATS -eq 2 ]; then + REORG_TABLE_TYPE=$IX_STATS; + elif [ $IF_STATS -eq 3 ]; then + REORG_TABLE_TYPE=$IF_STATS; + fi + reorgTables + fi + + ## list current state of OBJECT_ARRAY + listOBJECT_ARRAY; + + ## + ## now do runstats + ## + log 3 "Starting runstats of ..." + RUNSTATS_TIMEOUT_WINDOW_START_TIME_SECONDS=$( date '+%s' ); + + for((ii=0; ii< ${#OBJECT_ARRAY[@]}; ii++)) + do + + ## check runstats window maintenance time + MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS=$( date '+%s' ); + DIFF=$(( MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS - REORG_TIMEOUT_WINDOW_START_TIME_SECONDS )); + if [ $DIFF -ge $(( REORG_TIMEOUT_WINDOW_SECONDS + RUNSTATS_TIMEOUT_WINDOW_SECONDS)) ]; then + log 3 "$REORG_TIMEOUT_WINDOW_START_TIME_SECONDS +$MAINTENANCE_TIMEOUT_WINDOW_TIME_NOW_SECONDS $REORG_TIMEOUT_WINDOW_SECONDS $RUNSTATS_TIMEOUT_WINDOW_SECONDS $DIFF" + log 3 "Runstats window ending, runstats window time exceeded" + break + fi + + ## get table info + TABSCHEMA=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f1 ); + TABNAME=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f2 ); + INDSCHEMA=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f3 ); + INDNAME=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f4 ); + IID=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f5 ); + OBJECT_REORG_STATUS=$( echo ${OBJECT_ARRAY[$ii]} | cut -d# -f6 ); + + ## only do a runstats if table/index has completed + ## verify stats time so we dont kick off another runstats on the same table + if [ "$OBJECT_REORG_STATUS" == "COMPLETED" ]; then + STATS_TIME=$( db2 -x "select stats_time from syscat.tables where tabschema='$TABSCHEMA' and tabname='$TABNAME' and stats_time < TIMESTAMP('$WINDOW_START_TIME_DB2') " ); + rc=$? + if [ $rc -eq 0 ]; then + log 3 "Starting runstats on $TABSCHEMA.$TABNAME" + # db2 -v "runstats on table $TABSCHEMA.$TABNAME WITH DISTRIBUTION ON ALL COLUMNS AND SAMPLED DETAILED INDEXES ALL ALLOW WRITE ACCESS"; + db2 -v "runstats on table $TABSCHEMA.$TABNAME WITH DISTRIBUTION ON KEY COLUMNS AND SAMPLED DETAILED INDEXES ALL ALLOW WRITE ACCESS UTIL_IMPACT_PRIORITY 50"; + log 3 "Finished runstats on $TABSCHEMA.$TABNAME" + fi + fi + + done + + fi ## EXECUTE_TABLE_REORG + + + fi ## INPLACE + + +done ## DBNAMES + +## +## cleanup +## + +log 3 "Completed $0 at $(date)" + +exit 0 diff --git a/instance-applications/120-ibm-db2u-database/files/runstats_rebind.sh b/instance-applications/120-ibm-db2u-database/files/runstats_rebind.sh new file mode 100755 index 000000000..7415d6b4a --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/files/runstats_rebind.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# *************************************************************************** +# Author: Fu Le Qing (Roking) +# Email: leqingfu@cn.ibm.com +# Date: 10-31-2018 +# +# Description: This script updates statistics of tables, +# associated indexes in the database, and sends an email +# to a specified email list. +# +# ******** THIS NEEDS TO BE RUN AS INSTANCE OWNER. ************** +# +# Revision history: +# 10-31-2018 Fu Le Qing (Roking) +# Original version +# 11-16-2018 Fu Le Qing (Roking) +# Skip the tables which are ongoing with reorg +# 09-08-2023 Fu Le Qing (Roking) +# Update for MAS +# +# *************************************************************************** +# +# *************************************************************************** +if [ -f /mnt/backup/bin/.PROPS ] +then + . /mnt/backup/bin/.PROPS + DOW=`date | awk '{print $1}'` + if [ ${DOW} != ${DAYOFFULL} ] + then + exit 0 + fi +fi + +instance=`whoami` +instance_home=`/usr/local/bin/db2greg -dump | grep -ae "I," | grep -v "/das," | grep "${instance}" | awk -F ',' '{print $5}'| sed 's/\/sqllib//'` + +pidfile="$instance_home/.`basename ${0}`.pid" +if [ -e ${pidfile} ] && $kill -0 `cat ${pidfile}` 2>/dev/null +then + exit 0 +fi + +echo $$ > ${pidfile} +trap "rm -f ${pidfile}; exit" SIGHUP SIGINT SIGQUIT SIGABRT SIGTERM EXIT + +if [ ! -f "$instance_home/sqllib/db2profile" ] +then + echo "ERROR - $instance_home/sqllib/db2profile not found" + exit 1 +else + . $instance_home/sqllib/db2profile +fi + +RUNSTATS_TMP_FILE="$instance_home/.runstats.sql" +REBIND_TMP_FILE="$instance_home/.rebind.sql" + +mkdir -p $instance_home/maintenance/logs +DATESTAMP=`date "+%Y-%m-%d-%H.%M.%S"` + +for db in `db2 list db directory | grep -B 5 Indirect | grep "Database name" | cut -d= -f2` +do + role=`db2 get db cfg for ${db} | grep "HADR database role" | cut -d= -f2 |sed 's/ //g'` + if [ "$role" != "STANDBY" ]; then + if [ -f $RUNSTATS_TMP_FILE ] + then + rm $RUNSTATS_TMP_FILE + fi + if [ -f $REBIND_TMP_FILE ] + then + rm $REBIND_TMP_FILE + fi + db2 connect to ${db} | tee $instance_home/maintenance/logs/runstats_${db}_${DATESTAMP} + if [ $? -eq 0 ]; then + db2 -x "select 'RUNSTATS ON TABLE \"' ||rtrim(tab.tabschema)||'\".\"'|| tab.tabname ||'\" WITH DISTRIBUTION ON KEY COLUMNS AND DETAILED INDEXES ALL ALLOW WRITE ACCESS;' + from syscat.tables tab left join sysibmadm.SNAPTAB_REORG reg on tab.tabschema=reg.TABSCHEMA and tab.tabname=reg.TABNAME and reg.REORG_STATUS not in ('COMPLETED','STOPPED') + where tab.type='T' and reg.tabname is null" > $RUNSTATS_TMP_FILE + #db2 -x "select 'rebind package \"' ||rtrim(PKGSCHEMA)||'\".\"'|| PKGNAME ||'\";' from syscat.packages where PKGSCHEMA not in ('NULLID','NULLIDR1','NULLIDRA','SYSIBMADM','SYSIBMINTERNAL') and PKGSCHEMA not like 'NULL%' " > $REBIND_TMP_FILE + echo "Begin processing of runstats @ $DATESTAMP ..." | tee -a $instance_home/maintenance/logs/runstats_${db}_${DATESTAMP} + db2 -txvf $RUNSTATS_TMP_FILE | tee -a $instance_home/maintenance/logs/runstats_${db}_${DATESTAMP} + echo "End processing of runstats @ $DATESTAMP" | tee -a $instance_home/maintenance/logs/runstats_${db}_${DATESTAMP} + + #echo "Begin processing of rebind @ $DATESTAMP ..." | tee -a $instance_home/maintenance/logs/runstats_${db}_${DATESTAMP} + #db2 -txvf $REBIND_TMP_FILE | tee -a $instance_home/maintenance/logs/runstats_${db}_${DATESTAMP} + #echo "End processing of rebind @ $DATESTAMP" | tee -a $instance_home/maintenance/logs/runstats_${db}_${DATESTAMP} + rm $RUNSTATS_TMP_FILE + #rm $REBIND_TMP_FILE + db2 terminate + fi + fi +done diff --git a/instance-applications/120-ibm-db2u-database/templates/03-db2uinstance.yaml b/instance-applications/120-ibm-db2u-database/templates/03-db2uinstance.yaml index 7fd0b303b..a6d410134 100644 --- a/instance-applications/120-ibm-db2u-database/templates/03-db2uinstance.yaml +++ b/instance-applications/120-ibm-db2u-database/templates/03-db2uinstance.yaml @@ -150,6 +150,7 @@ spec: storage: "{{ .Values.db2_audit_logs_storage_size }}" storageClassName: "{{ .Values.db2_audit_logs_storage_class }}" {{- end }} +{{- if .Values.db2_archivelogs_storage_class }} - name: archivelogs type: create spec: @@ -159,3 +160,4 @@ spec: resources: requests: storage: "{{ .Values.db2_archivelogs_storage_size }}" +{{- end }} \ No newline at end of file diff --git a/instance-applications/120-ibm-db2u-database/templates/04-db2u-Backup_Cron.yaml b/instance-applications/120-ibm-db2u-database/templates/04-db2u-Backup_Cron.yaml new file mode 100644 index 000000000..b08370512 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/templates/04-db2u-Backup_Cron.yaml @@ -0,0 +1,36 @@ +{{- if .Values.db2_backup_bucket_name }} +#apiVersion: batch/v1beta1 +kind: CronJob +apiVersion: batch/v1 +metadata: + name: "db2-backup-job-v1-{{ .Values.db2_instance_name }}" + namespace: "{{ .Values.db2_namespace }}" + labels: + parent: cronjob + cosBucket: "{{ .Values.db2_backup_bucket_name }}" + db2pod: "c-{{ .Values.db2_instance_name }}-db2u-0" +spec: + schedule: '0 2 * * *' + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + metadata: + labels: + parent: cronjob + spec: + containers: + - name: "db2-backup-job-v1-{{ .Values.db2_instance_name }}" + image: quay.io/ibmmas/cli:latest + command: + - oc + - rsh + - "c-{{ .Values.db2_instance_name }}-db2u-0" + - /mnt/backup/bin/cronRunBKP.sh + - "{{ .Values.db2_backup_bucket_name }}" + imagePullPolicy: IfNotPresent + restartPolicy: OnFailure + serviceAccountName: "account-{{ .Values.db2_namespace }}-{{ .Values.db2_instance_name }}" + successfulJobsHistoryLimit: 30 + failedJobsHistoryLimit: 10 +{{- end }} diff --git a/instance-applications/120-ibm-db2u-database/templates/05-db2u-scripts-ConfigMap.yaml b/instance-applications/120-ibm-db2u-database/templates/05-db2u-scripts-ConfigMap.yaml new file mode 100644 index 000000000..a6e98f779 --- /dev/null +++ b/instance-applications/120-ibm-db2u-database/templates/05-db2u-scripts-ConfigMap.yaml @@ -0,0 +1,10 @@ +{{- if .Values.db2_backup_bucket_name }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: db2u-scripts-{{ .Values.db2_instance_name }}-configmap + namespace: "{{ .Values.db2_namespace }}" +data: +{{ (.Files.Glob "files/*").AsConfig | indent 2 }} +{{- end }} \ No newline at end of file diff --git a/instance-applications/120-ibm-db2u-database/templates/04-tlsroute.yaml b/instance-applications/120-ibm-db2u-database/templates/06-tlsroute.yaml similarity index 100% rename from instance-applications/120-ibm-db2u-database/templates/04-tlsroute.yaml rename to instance-applications/120-ibm-db2u-database/templates/06-tlsroute.yaml diff --git a/instance-applications/120-ibm-db2u-database/templates/05-postsync-setup-db2_Job.yaml b/instance-applications/120-ibm-db2u-database/templates/07-postsync-setup-db2_Job.yaml similarity index 65% rename from instance-applications/120-ibm-db2u-database/templates/05-postsync-setup-db2_Job.yaml rename to instance-applications/120-ibm-db2u-database/templates/07-postsync-setup-db2_Job.yaml index e798d1974..ff372831b 100644 --- a/instance-applications/120-ibm-db2u-database/templates/05-postsync-setup-db2_Job.yaml +++ b/instance-applications/120-ibm-db2u-database/templates/07-postsync-setup-db2_Job.yaml @@ -13,6 +13,11 @@ metadata: data: aws_access_key_id: {{ .Values.sm_aws_access_key_id | b64enc }} aws_secret_access_key: {{ .Values.sm_aws_secret_access_key | b64enc }} +{{- if .Values.db2_backup_bucket_name }} + db2_backup_bucket_access_key: {{ .Values.db2_backup_bucket_access_key | b64enc }} + db2_backup_bucket_secret_key: {{ .Values.db2_backup_bucket_secret_key | b64enc }} + icd_auth_key: {{ .Values.db2_backup_icd_auth_key | b64enc }} +{{- end }} type: Opaque --- @@ -102,7 +107,7 @@ kind: Job metadata: # Suffix the Job name with a hash of all chart values # This is to ensure that ArgoCD will delete and recreate the job if (and only if) anything changes in the DB2 config - name: postsync-setup-db2-{{ .Values.db2_instance_name }}-v4-{{ omit .Values "junitreporter" | toYaml | adler32sum }} + name: postsync-setup-db2-{{ .Values.db2_instance_name }}-v6-{{ omit .Values "junitreporter" | toYaml | adler32sum }} namespace: "{{ .Values.db2_namespace }}" annotations: argocd.argoproj.io/sync-wave: "129" @@ -151,12 +156,26 @@ spec: value: "{{ .Values.db2_dbname }}" - name: DB2_TLS_VERSION value: "{{ .Values.db2_tls_version }}" + - name: DB2_INSTANCE_HOME_PATH + value: "{{ .Values.db2_instance_home_path }}" +{{- if .Values.db2_backup_bucket_name }} + - name: BUCKET_NAME + value: "{{ .Values.db2_backup_bucket_name }}" + - name: COS_SERVER + value: "{{ .Values.db2_backup_bucket_endpoint }}" + - name: SLACKURL + value: "{{ .Values.db2_backup_notify_slack_url }}" +{{- end }} volumeMounts: - name: aws mountPath: /etc/mas/creds/aws - name: db2u-certificate mountPath: /etc/mas/creds/db2u-certificate +{{- if .Values.db2_backup_bucket_name }} + - name: db2-scripts + mountPath: "/tmp/db2-scripts/" +{{- end }} command: - /bin/sh - -c @@ -207,6 +226,12 @@ spec: RETRIES=${1:-5} RETRY_DELAY_SECONDS=${2:-30} + mas-devops-db2-validate-config --mas-instance-id ${MAS_INSTANCE_ID} --mas-app-id ${MAS_APP_ID} --log-level DEBUG || rc=$? + if [[ "$rc" == "0" ]]; then + echo "... db2 config already matches expected config, returning without calling apply-db2cfg-settings" + return 0 + fi + for (( c=1; c<="${RETRIES}"; c++ )); do echo "" echo "... attempt ${c} of ${RETRIES}" @@ -249,6 +274,12 @@ spec: export SM_AWS_ACCESS_KEY_ID=$(cat /etc/mas/creds/aws/aws_access_key_id) export SM_AWS_SECRET_ACCESS_KEY=$(cat /etc/mas/creds/aws/aws_secret_access_key) export SM_AWS_REGION=${REGION_ID} + + if [[ -n $BUCKET_NAME ]]; then + export DB2_BACKUP_BUCKET_ACCESS_KEY=$(cat /etc/mas/creds/aws/db2_backup_bucket_access_key) + export DB2_BACKUP_BUCKET_SECRET_KEY=$(cat /etc/mas/creds/aws/db2_backup_bucket_secret_key) + export ICD_AUTH_KEY=$(cat /etc/mas/creds/aws/icd_auth_key) + fi echo "" echo "================================================================================" @@ -268,6 +299,61 @@ spec: echo "================================================================================" wait_for_resource "svc" "c-${DB2_INSTANCE_NAME}-db2u-engn-svc" "${DB2_NAMESPACE}" + echo "" + echo "Creating /mnt/backup/MIRRORLOGPATH in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p /mnt/backup/MIRRORLOGPATH" db2inst1 || exit $? + + echo "" + echo "Creating /mnt/backup/staging in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p /mnt/backup/staging" db2inst1 || exit $? + + echo "" + echo "Creating /mnt/backup/bin in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p /mnt/backup/bin" db2inst1 || exit $? + + echo "" + echo "Getting Instance owner and home directory in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + INSTOWNER='db2inst1' + echo "Instance owner is ${INSTOWNER}" + INSTHOME=`oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "cat /etc/passwd | grep ${INSTOWNER} | cut -d: -f6" db2inst1` + echo "Instance home directory is ${INSTHOME}" + + echo "" + echo "Creating ${INSTHOME}/bin in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p ${INSTHOME}/bin" db2inst1 || exit $? + + echo "" + echo "Creating ${INSTHOME}/bin/ITCS104 in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p ${INSTHOME}/bin/ITCS104" db2inst1 || exit $? + + echo "" + echo "Creating ${INSTHOME}/Managed in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p ${INSTHOME}/Managed" db2inst1 || exit $? + + echo "" + echo "Creating ${INSTHOME}/maintenance in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p ${INSTHOME}/maintenance" db2inst1 || exit $? + + if [[ -n $BUCKET_NAME ]]; then + echo "" + echo "Copying all the required DB2 scripts in c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p /tmp/db2-scripts/" db2inst1 || exit $? + cd /tmp + zip -r db2-scripts.zip db2-scripts/ + oc cp /tmp/db2-scripts.zip ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0:/tmp/db2-scripts.zip -c db2u || exit $? + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "cd /tmp; unzip -o db2-scripts.zip; chmod -R +x /tmp/db2-scripts" db2inst1 || exit $? + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "/tmp/db2-scripts/CopyDBScripts.sh | tee /tmp/copydbscripts.log" db2inst1 || exit $? + fi + if [[ "$MAS_APP_ID" == "manage" ]]; then echo "" echo "================================================================================" @@ -368,6 +454,14 @@ spec: if [[ "\$maxd_bpf" -eq "0" && "\$maxi_bpf" -eq "0" && "\$maxtmp_bpf" -eq "0" && "\$maxd_tbsp" -eq "0" && "\$maxi_tbsp" -eq "0" && "\$maxtmp_tbsp" -eq "0" && -f "\$TBSP_SQL" ]]; then db2 -tvf \$TBSP_SQL + if grep -qw 'CREATE' \$TBSP_SQL; then + db2 connect reset + db2stop force + ipclean + db2start + db2 connect to ${DB2_DBNAME} + sleep 30 + fi rm \$TBSP_SQL else echo "Error detected." @@ -388,19 +482,25 @@ spec: echo "--------------------------------------------------------------------------------" oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${SETUPDB_SH_PATH} | tee /tmp/setupdb.log" db2inst1 || exit $? - echo "" - echo "Creating /mnt/backup/MIRRORLOGPATH in c-${DB2_INSTANCE_NAME}-db2u-0" - echo "--------------------------------------------------------------------------------" - oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p /mnt/backup/MIRRORLOGPATH" db2inst1 || exit $? - - echo "" - echo "Creating /mnt/backup/staging in c-${DB2_INSTANCE_NAME}-db2u-0" - echo "--------------------------------------------------------------------------------" - oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "mkdir -p /mnt/backup/staging" db2inst1 || exit $? + if [[ -n $BUCKET_NAME ]]; then + CREATE_ROLES_SH_PATH="${DB2_INSTANCE_HOME_PATH}/bin/CreateRoles.sh" + echo "" + echo "Executing ${CREATE_ROLES_SH_PATH} file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "cd ${DB2_INSTANCE_HOME_PATH}/bin; ${CREATE_ROLES_SH_PATH} | tee /tmp/createroles.log" db2inst1 || exit $? + fi fi # [[ "$MAS_APP_ID" == "manage" ]] + if [[ -n $BUCKET_NAME ]]; then + POST_BACKFLOW_SH_PATH="${DB2_INSTANCE_HOME_PATH}/Managed/PostBackFlow.sh" + echo "" + echo "Executing ${POST_BACKFLOW_SH_PATH} file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${POST_BACKFLOW_SH_PATH} | tee /tmp/postbackflowscriptcreation.log" db2inst1 || exit $? + fi + echo "" echo "================================================================================" echo "Calling apply-db2cfg-settings.sh file on c-${DB2_INSTANCE_NAME}-db2u-0" @@ -537,6 +637,86 @@ spec: TAGS="[{\"Key\": \"source\", \"Value\": \"postsync-setup-db2\"}, {\"Key\": \"account\", \"Value\": \"${ACCOUNT_ID}\"}, {\"Key\": \"cluster\", \"Value\": \"${CLUSTER_ID}\"}]" sm_update_secret ${DB2_CONFIG_SECRET} "{ \"jdbc_connection_url\": \"${JDBC_CONNECTION_URL}\", \"jdbc_instance_name\": \"${DB2_INSTANCE_NAME}\", \"ca_b64\": \"${DB2_CA_PEM}\", \"db2_dbname\": \"${DB2_DBNAME}\", \"db2_namespace\": \"${DB2_NAMESPACE}\" }" "${TAGS}" || exit $? + if [[ -n $BUCKET_NAME ]]; then + echo "" + echo "================================================================================" + echo "Invoke Fix invalid objects for Manage" + echo "================================================================================" + + FIX_INVALID_OBJECTS_SH_PATH="${DB2_INSTANCE_HOME_PATH}/bin/ITCS104/FixInvalidObjects.sh" + + echo "" + echo "Executing ${FIX_INVALID_OBJECTS_SH_PATH} file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${FIX_INVALID_OBJECTS_SH_PATH} | tee /tmp/fixInvalidObjects.log" db2inst1 || exit $? + + SQL_FILES_AVAILABLE=`oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "find ${INSTHOME} -name \"ALL_*.sql\"" db2inst1 || exit $?` + + if [[ -z $SQL_FILES_AVAILABLE ]]; then + + echo "" + echo "================================================================================" + echo "Invoke Tablespace alter script for DB2" + echo "================================================================================" + + LARGE_TBSP_SH_PATH="${DB2_INSTANCE_HOME_PATH}/Managed/Reg-Large_TBSP.sh" + + echo "" + echo "Executing ${LARGE_TBSP_SH_PATH} file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${LARGE_TBSP_SH_PATH} | tee /tmp/regLargeTbsp.log" db2inst1 || exit $? + + fi + + PROPS_FILE_NAME=".PROPS" + echo "" + echo "Creating .PROPS file" + echo "--------------------------------------------------------------------------------" + + echo "PARM1=${DB2_BACKUP_BUCKET_ACCESS_KEY}" > /tmp/${PROPS_FILE_NAME} + echo "PARM2=${DB2_BACKUP_BUCKET_SECRET_KEY}" >> /tmp/${PROPS_FILE_NAME} + echo "CONTAINER=${BUCKET_NAME}" >> /tmp/${PROPS_FILE_NAME} + echo "SERVER=${COS_SERVER}" >> /tmp/${PROPS_FILE_NAME} + echo "SLACKURL=${SLACKURL}" >> /tmp/${PROPS_FILE_NAME} + echo "DAYOFFULL=Sat" >> /tmp/${PROPS_FILE_NAME} + echo "NUMOFBKUPTOKEEP=60" >> /tmp/${PROPS_FILE_NAME} + echo "ICD_AUTH_KEY=${ICD_AUTH_KEY}" >> /tmp/${PROPS_FILE_NAME} + + echo "" + echo "Copy ${PROPS_FILE_NAME} to ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc cp /tmp/${PROPS_FILE_NAME} ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0:/tmp/${PROPS_FILE_NAME} -c db2u || exit $? + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "sudo mv /tmp/${PROPS_FILE_NAME} /mnt/backup/bin/${PROPS_FILE_NAME}" db2inst1 || exit $? + + SET_COS_STORAGE_SH_PATH="${INSTHOME}/Managed/Set_DB_COS_Storage.sh" + + echo "" + echo "Executing ${SET_COS_STORAGE_SH_PATH} file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${SET_COS_STORAGE_SH_PATH} | tee /tmp/setcosstorage.log" db2inst1 || exit $? + + CHECK_COS_STORAGE_SH_PATH="CheckCOS.sh" + echo "" + echo "Executing ${INSTHOME}/bin/CheckCOS.sh file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${INSTHOME}/bin/CheckCOS.sh | tee /tmp/checkcos.log" db2inst1 || exit $? + + echo "" + echo "Executing ${INSTHOME}/bin/RUN_OnDemandFULL_BKP.sh file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${INSTHOME}/bin/RUN_OnDemandFULL_BKP.sh | tee /tmp/runondemandfullbackup.log" db2inst1 || exit $? + + DROP_TEMPTS="/tmp/db2-scripts/dropTempts.sh" + echo "" + echo "Executing file ${DROP_TEMPTS} on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${DROP_TEMPTS} | tee /tmp/dropTempts.log" db2inst1 || exit $? + + echo "" + echo "Executing ${INSTHOME}/bin/RUN_OnDemandFULL_BKP.sh file on ${DB2_NAMESPACE}/c-${DB2_INSTANCE_NAME}-db2u-0" + echo "--------------------------------------------------------------------------------" + oc exec -n ${DB2_NAMESPACE} c-${DB2_INSTANCE_NAME}-db2u-0 -- su -lc "${INSTHOME}/bin/RUN_OnDemandFULL_BKP.sh | tee /tmp/runondemandfullbackup.log" db2inst1 || exit $? + fi restartPolicy: Never @@ -552,4 +732,9 @@ spec: secretName: "db2u-certificate-{{ .Values.db2_instance_name }}" defaultMode: 420 optional: false +{{- if .Values.db2_backup_bucket_name }} + - name: db2-scripts + configMap: + name: db2u-scripts-{{ .Values.db2_instance_name }}-configmap +{{- end }} backoffLimit: 4 diff --git a/instance-applications/120-ibm-db2u-database/values.yaml b/instance-applications/120-ibm-db2u-database/values.yaml index 08894f174..9ac64f135 100644 --- a/instance-applications/120-ibm-db2u-database/values.yaml +++ b/instance-applications/120-ibm-db2u-database/values.yaml @@ -2,6 +2,7 @@ instance_id: xxx db2_instance_name: xxx db2_namespace: xxxx +db2_instance_home_path: /mnt/blumeta0/home/db2inst1 sm_aws_access_key_id: xxx sm_aws_secret_access_key: xxxx \ No newline at end of file diff --git a/root-applications/ibm-mas-instance-root/templates/000-ibm-sync-resources.yaml b/root-applications/ibm-mas-instance-root/templates/000-ibm-sync-resources.yaml index ad7910415..9873f026e 100644 --- a/root-applications/ibm-mas-instance-root/templates/000-ibm-sync-resources.yaml +++ b/root-applications/ibm-mas-instance-root/templates/000-ibm-sync-resources.yaml @@ -84,7 +84,7 @@ spec: {{- end }} junitreporter: - reporter_name: "ibm-sync-resources" + reporter_name: "ibm-sync-resources-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/010-ibm-sync-jobs.yaml b/root-applications/ibm-mas-instance-root/templates/010-ibm-sync-jobs.yaml index a92dbef91..dbdc9fdcf 100644 --- a/root-applications/ibm-mas-instance-root/templates/010-ibm-sync-jobs.yaml +++ b/root-applications/ibm-mas-instance-root/templates/010-ibm-sync-jobs.yaml @@ -91,7 +91,7 @@ spec: {{- end }} junitreporter: - reporter_name: "ibm-sync-jobs" + reporter_name: "ibm-sync-jobs-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/100-ibm-sls-app.yaml b/root-applications/ibm-mas-instance-root/templates/100-ibm-sls-app.yaml index 8315095be..9f8d2489f 100644 --- a/root-applications/ibm-mas-instance-root/templates/100-ibm-sls-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/100-ibm-sls-app.yaml @@ -59,7 +59,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-sls" + reporter_name: "ibm-sls-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/110-ibm-cp4d-app.yaml b/root-applications/ibm-mas-instance-root/templates/110-ibm-cp4d-app.yaml index 7547da6d1..cfa236287 100644 --- a/root-applications/ibm-mas-instance-root/templates/110-ibm-cp4d-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/110-ibm-cp4d-app.yaml @@ -97,7 +97,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-cp4d" + reporter_name: "ibm-cp4d-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/110-ibm-cp4d-operator-app.yaml b/root-applications/ibm-mas-instance-root/templates/110-ibm-cp4d-operator-app.yaml index 1ceeb665e..5960814c6 100644 --- a/root-applications/ibm-mas-instance-root/templates/110-ibm-cp4d-operator-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/110-ibm-cp4d-operator-app.yaml @@ -89,7 +89,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-cp4d-operator" + reporter_name: "ibm-cp4d-operator-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/110-ibm-cs-control-app.yaml b/root-applications/ibm-mas-instance-root/templates/110-ibm-cs-control-app.yaml index 4f41eb311..b7f909cde 100644 --- a/root-applications/ibm-mas-instance-root/templates/110-ibm-cs-control-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/110-ibm-cs-control-app.yaml @@ -88,7 +88,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-cs-control" + reporter_name: "ibm-cs-control-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/110-ibm-db2u-app.yaml b/root-applications/ibm-mas-instance-root/templates/110-ibm-db2u-app.yaml index 537db9762..b277800b4 100644 --- a/root-applications/ibm-mas-instance-root/templates/110-ibm-db2u-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/110-ibm-db2u-app.yaml @@ -43,7 +43,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-db2u" + reporter_name: "ibm-db2u-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/120-ibm-aiopenscale-app.yaml b/root-applications/ibm-mas-instance-root/templates/120-ibm-aiopenscale-app.yaml index c1624a28a..7c2375b9c 100644 --- a/root-applications/ibm-mas-instance-root/templates/120-ibm-aiopenscale-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/120-ibm-aiopenscale-app.yaml @@ -47,7 +47,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-aiopenscale" + reporter_name: "ibm-aiopenscale-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/120-ibm-spark-app.yaml b/root-applications/ibm-mas-instance-root/templates/120-ibm-spark-app.yaml index 31548ed2f..b52121af2 100644 --- a/root-applications/ibm-mas-instance-root/templates/120-ibm-spark-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/120-ibm-spark-app.yaml @@ -51,7 +51,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-spark" + reporter_name: "ibm-spark-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/120-ibm-spss-app.yaml b/root-applications/ibm-mas-instance-root/templates/120-ibm-spss-app.yaml index e8fde2526..0b78eff81 100644 --- a/root-applications/ibm-mas-instance-root/templates/120-ibm-spss-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/120-ibm-spss-app.yaml @@ -55,7 +55,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-spss" + reporter_name: "ibm-spss-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/120-ibm-wml-app.yaml b/root-applications/ibm-mas-instance-root/templates/120-ibm-wml-app.yaml index 2e02c69c0..3338e599d 100644 --- a/root-applications/ibm-mas-instance-root/templates/120-ibm-wml-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/120-ibm-wml-app.yaml @@ -52,7 +52,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-wml" + reporter_name: "ibm-wml-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/120-ibm-wsl-app.yaml b/root-applications/ibm-mas-instance-root/templates/120-ibm-wsl-app.yaml index 5987d8da3..c6634633a 100644 --- a/root-applications/ibm-mas-instance-root/templates/120-ibm-wsl-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/120-ibm-wsl-app.yaml @@ -55,7 +55,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-wsl" + reporter_name: "ibm-wsl-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-app.yaml b/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-app.yaml index 88433e948..b49b1720c 100644 --- a/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-app.yaml @@ -113,7 +113,7 @@ spec: {{- end }} junitreporter: - reporter_name: "ibm-mas-suite" + reporter_name: "ibm-mas-suite-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml b/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml index 8ea080c1d..5b402e3ff 100644 --- a/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml +++ b/root-applications/ibm-mas-instance-root/templates/130-ibm-mas-suite-configs-app.yaml @@ -55,7 +55,7 @@ spec: {{- end }} {{ $value | toYaml | nindent 12 }} junitreporter: - reporter_name: "{{ $value.mas_config_chart }}" + reporter_name: "{{ $value.mas_config_chart }}-{{ $.Values.instance.id }}" cluster_id: "{{ $.Values.cluster.id }}" devops_mongo_uri: "{{ $.Values.devops.mongo_uri }}" devops_build_number: "{{ $.Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/200-ibm-mas-workspaces.yaml b/root-applications/ibm-mas-instance-root/templates/200-ibm-mas-workspaces.yaml index f5e5dffb7..830b8a8f0 100644 --- a/root-applications/ibm-mas-instance-root/templates/200-ibm-mas-workspaces.yaml +++ b/root-applications/ibm-mas-instance-root/templates/200-ibm-mas-workspaces.yaml @@ -46,7 +46,7 @@ spec: custom_labels: {{ $.Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-mas-workspace-{{ $value.mas_workspace_id }}" + reporter_name: "ibm-mas-ws-{{ $.Values.instance.id }}-{{ $value.mas_workspace_id }}" cluster_id: "{{ $.Values.cluster.id }}" devops_mongo_uri: "{{ $.Values.devops.mongo_uri }}" devops_build_number: "{{ $.Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-manage-install.yaml b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-manage-install.yaml index 7cfc6ee63..323d550f4 100644 --- a/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-manage-install.yaml +++ b/root-applications/ibm-mas-instance-root/templates/500-ibm-mas-masapp-manage-install.yaml @@ -61,7 +61,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-mas-suite-app-install-manage" + reporter_name: "app-install-manage-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/510-550-ibm-mas-masapp-configs.yaml b/root-applications/ibm-mas-instance-root/templates/510-550-ibm-mas-masapp-configs.yaml index bc91caffb..7755b541a 100644 --- a/root-applications/ibm-mas-instance-root/templates/510-550-ibm-mas-masapp-configs.yaml +++ b/root-applications/ibm-mas-instance-root/templates/510-550-ibm-mas-masapp-configs.yaml @@ -94,7 +94,7 @@ spec: {{- end }} junitreporter: - reporter_name: "ibm-mas-suite-app-config-{{ $value.mas_app_id }}" + reporter_name: "app-config-{{ $value.mas_app_id }}-{{ $.Values.instance.id }}" cluster_id: "{{ $.Values.cluster.id }}" devops_mongo_uri: "{{ $.Values.devops.mongo_uri }}" devops_build_number: "{{ $.Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-assist-install.yaml b/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-assist-install.yaml index b84583936..ce1c6de12 100644 --- a/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-assist-install.yaml +++ b/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-assist-install.yaml @@ -60,7 +60,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-mas-suite-app-install-assist" + reporter_name: "app-install-assist-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-iot-install.yaml b/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-iot-install.yaml index 4c930ee78..550854973 100644 --- a/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-iot-install.yaml +++ b/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-iot-install.yaml @@ -61,7 +61,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-mas-suite-app-install-iot" + reporter_name: "app-install-iot-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-visualinspection-install.yaml b/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-visualinspection-install.yaml index 97c372579..828572b94 100644 --- a/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-visualinspection-install.yaml +++ b/root-applications/ibm-mas-instance-root/templates/510-ibm-mas-masapp-visualinspection-install.yaml @@ -65,7 +65,7 @@ spec: storage_class_definitions: {{ .Values.ibm_suite_app_visualinspection_install.storage_class_definitions | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-mas-suite-app-install-mvi" + reporter_name: "app-install-mvi-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-health-install.yaml b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-health-install.yaml index 981d7226d..7adf44650 100644 --- a/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-health-install.yaml +++ b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-health-install.yaml @@ -61,7 +61,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-mas-suite-app-install-health" + reporter_name: "app-install-health-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-monitor-install.yaml b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-monitor-install.yaml index e066b88b0..5c311fdb0 100644 --- a/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-monitor-install.yaml +++ b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-monitor-install.yaml @@ -61,7 +61,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-mas-suite-app-install-monitor" + reporter_name: "app-install-monitor-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-optimizer-install.yaml b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-optimizer-install.yaml index c8b3f34fa..8b0202d1d 100644 --- a/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-optimizer-install.yaml +++ b/root-applications/ibm-mas-instance-root/templates/520-ibm-mas-masapp-optimizer-install.yaml @@ -61,7 +61,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-mas-suite-app-install-optimizer" + reporter_name: "app-install-optimizer-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}" diff --git a/root-applications/ibm-mas-instance-root/templates/540-ibm-mas-masapp-predict-install.yaml b/root-applications/ibm-mas-instance-root/templates/540-ibm-mas-masapp-predict-install.yaml index 6ce9bd6de..ea26cd656 100644 --- a/root-applications/ibm-mas-instance-root/templates/540-ibm-mas-masapp-predict-install.yaml +++ b/root-applications/ibm-mas-instance-root/templates/540-ibm-mas-masapp-predict-install.yaml @@ -61,7 +61,7 @@ spec: custom_labels: {{ .Values.custom_labels | toYaml | nindent 14 }} {{- end }} junitreporter: - reporter_name: "ibm-mas-suite-app-install-predict" + reporter_name: "app-install-predict-{{ .Values.instance.id }}" cluster_id: "{{ .Values.cluster.id }}" devops_mongo_uri: "{{ .Values.devops.mongo_uri }}" devops_build_number: "{{ .Values.devops.build_number }}"