diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml
index feabc473..212e6aca 100644
--- a/.github/workflows/build-test.yml
+++ b/.github/workflows/build-test.yml
@@ -1,167 +1,188 @@
-name: build
-
-# Controls when the action will run.
-on:
- # pull_request:
-
- push:
-
-jobs:
- deploy:
- name: K8s QuickStart CI test
- runs-on: ubuntu-latest
- timeout-minutes: 30
-
- steps:
- - name: Set env and tools
- run: |
- echo "TESTCLUSTERNAME=k8s-gha-test-$(date +%s)" >> $GITHUB_ENV
- echo "TESTRUNBRANCH=${GITHUB_REF##*/}" >> $GITHUB_ENV
- #
- sudo gem update --system 3.0.6
- sudo gem install yaml-lint
- sudo snap install kubectl --classic
- kubectl version --client
- curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash # setup Helm 3
-
- - name: Checkout
- uses: actions/checkout@v2
-
- - name: Set up Cloud SDK
- uses: google-github-actions/setup-gcloud@master
- with:
- project_id: ${{ secrets.GCP_PROJECT_ID }}
- service_account_key: ${{ secrets.GCP_SA_KEY }}
- export_default_credentials: true
-
- - name: Linting yaml files and chart
- run: |
- yaml-lint -n pubsubplus/*.yaml
- helm lint pubsubplus
-
- - name: Setup K8s env in GKE
- run: |
- mkdir gke_test; pushd gke_test
- wget https://raw.githubusercontent.com/SolaceProducts/solace-gke-quickstart/master/scripts/create_cluster.sh
- chmod +x create_cluster.sh
- ./create_cluster.sh -z us-east4-a,us-east4-b,us-east4-c -c $TESTCLUSTERNAME -m e2-standard-2
- gcloud container clusters get-credentials $TESTCLUSTERNAME --zone us-east4-a --project capable-stream-180018
- popd
- kubectl get statefulset,svc,pods,pvc,pv
-
- - name: Deploy HA broker and test
- run: |
- REPO=solace/solace-pubsub-standard
- TAG=latest
- openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=*"
- kubectl create secret tls test-tls --key="tls.key" --cert="tls.crt"
- helm install my-release pubsubplus --set solace.size=dev,solace.redundancy=true,tls.enabled=true,tls.serverCertificatesSecret=test-tls,solace.usernameAdminPassword=admin,image.repository=$REPO,image.tag=$TAG
- kubectl get statefulset,svc,pods,pvc,pv --show-labels
- echo "Waiting for broker to become active"
- sleep 40; kubectl describe nodes
- until kubectl get pods --show-labels | grep pubsubplus-0 | grep -m 1 -E '1/1'; do sleep 10; done
- until kubectl get pods --show-labels | grep pubsubplus-1 | grep -m 1 -E '1/1'; do sleep 10; done
- until kubectl get pods --show-labels | grep pubsubplus-2 | grep -m 1 -E '1/1'; do sleep 10; done
- until kubectl get pods --show-labels | grep pubsubplus- | grep -m 1 -E 'active=true'; do sleep 10; done
- helm test my-release | grep Phase | grep Succeeded
- kubectl get statefulset,svc,pods,pvc,pv --show-labels
- bash -c 'if [[ `kubectl get po --show-labels | grep -c "1/1"` -ne 3 ]]; then echo "Some pods are not ready!"; kubectl get po --show-labels; exit 1; fi'
- export url="$(kubectl get statefulset,svc,pods,pvc,pv --show-labels | grep LoadBalancer | awk '{print $4}')"; echo $url
- curl -O https://sftp.solace.com/download/SDKPERF_C_LINUX64
- tar -xvf SDKPERF_C_LINUX64
- pubSubTools/sdkperf_c -cip=tcp://$url:55555 -mn=10000 -mr=0 -ptl=t1 -stl=t1 | grep "Total Messages"
- pubSubTools/sdkperf_c -cip=tcps://$url:55443 -mn=10000 -mr=0 -ptl=t1 -stl=t1 | grep "Total Messages"
- sleep 30
- curl -k -sS -u admin:admin https://$url:1943/SEMP -d ""
- curl -k -sS -u admin:admin https://$url:1943/SEMP -d ""
- if [[ -z `curl -sS -u admin:admin http://$url:8080/SEMP -d "" | grep "Up"` ]] ; then echo "config-sync not up!"; exit 1; fi
- helm list
-
- - name: Upgrade HA broker and test
- run: |
- REPO=solace/solace-pubsub-standard
- UPGRADETAG=$(wget -q https://registry.hub.docker.com/v1/repositories/solace/solace-pubsub-standard/tags -O - | sed -e 's/[][]//g' -e 's/"//g' -e 's/ //g' | tr '}' '\n' | awk -F: '{print $3}' | sort -t "." -k1,1n -k2,2n -k3,3n | tail -1)
- helm upgrade my-release pubsubplus --set solace.size=dev,solace.redundancy=true,tls.enabled=true,tls.serverCertificatesSecret=test-tls,solace.usernameAdminPassword=admin,image.repository=$REPO,image.tag=$UPGRADETAG
- kubectl get statefulset,svc,pods,pvc,pv --show-labels
- echo "Waiting for broker to become active after upgrade"
- statefulset_name=$(kubectl get statefulset | grep pubsubplus | awk '{print $1}')
- until kubectl rollout status statefulset $statefulset_name -w | grep "rolling update complete"; do sleep 10; done
- until kubectl get pods --show-labels | grep pubsubplus-0 | grep -m 1 -E '1/1'; do sleep 10; done
- until kubectl get pods --show-labels | grep pubsubplus-1 | grep -m 1 -E '1/1'; do sleep 10; done
- until kubectl get pods --show-labels | grep pubsubplus-2 | grep -m 1 -E '1/1'; do sleep 10; done
- until kubectl get pods --show-labels | grep pubsubplus- | grep -m 1 -E 'active=true'; do sleep 10; done
- helm test my-release | grep Phase | grep Succeeded
- kubectl get statefulset,svc,pods,pvc,pv --show-labels
- bash -c 'if [[ `kubectl get po --show-labels | grep -c "1/1"` -ne 3 ]]; then echo "Some pods are not ready!"; kubectl get po --show-labels; exit 1; fi'
- export url="$(kubectl get statefulset,svc,pods,pvc,pv --show-labels | grep LoadBalancer | awk '{print $4}')"; echo $url
- pubSubTools/sdkperf_c -cip=tcp://$url:55555 -mn=10000 -mr=0 -ptl=t1 -stl=t1 | grep "Total Messages"
- pubSubTools/sdkperf_c -cip=tcps://$url:55443 -mn=10000 -mr=0 -ptl=t1 -stl=t1 | grep "Total Messages"
- sleep 10
- curl -k -sS -u admin:admin https://$url:1943/SEMP -d ""
- curl -k -sS -u admin:admin https://$url:1943/SEMP -d ""
- if [[ -z `curl -sS -u admin:admin http://$url:8080/SEMP -d "" | grep "Up"` ]] ; then echo "config-sync not up!"; exit 1; fi
- helm list
- helm delete $(helm list | grep deployed | awk '{print $1}')
- kubectl delete pvc --all
-
- - name: Create chart variants
- run: |
- bash docs/helm-charts/create-chart-variants.sh; # Create chart variants
- helm lint pubsubplus
- helm install --generate-name pubsubplus --dry-run
- helm lint pubsubplus-ha
- helm install --generate-name pubsubplus-ha --dry-run
- helm lint pubsubplus-dev
- helm install --generate-name pubsubplus-dev --dry-run
- helm lint pubsubplus-openshift
- helm install --generate-name pubsubplus-openshift --dry-run
- helm lint pubsubplus-openshift-ha
- helm install --generate-name pubsubplus-openshift-ha --dry-run
- helm lint pubsubplus-openshift-dev
- helm install --generate-name pubsubplus-openshift-dev --dry-run
-
- - name: Publish artifacts
- run: |
- # Two groups of Helm repos are created:
- # 1 - for general Helm charts that are hosted by Solace from gh-pages
- # 2 - for OpenShift variants that will be further submitted to OpenShift repo
- git config --global user.name "GitHub Actions Automation"
- git config --global user.email "<>"
- mkdir gh-pages; # Now update gh-pages
- if [ ${{ github.ref }} == 'refs/heads/master' ] && [ ${{ github.repository_owner }} == 'SolaceProducts' ] ; then
- echo "Using master on SolaceProducts"
- git clone --quiet --branch=gh-pages https://${{ secrets.GH_TOKEN }}@github.com/SolaceProducts/pubsubplus-kubernetes-quickstart gh-pages > /dev/null 2>&1
- rm -rf gh-pages/helm-charts-openshift; mkdir -p gh-pages/helm-charts-openshift
- mv pubsubplus-openshift-*.tgz gh-pages/helm-charts-openshift/
- helm repo index gh-pages/helm-charts-openshift/ --url https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts-openshift
- mv pubsubplus-*.tgz gh-pages/helm-charts/
- helm repo index gh-pages/helm-charts/ --url https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
- pushd gh-pages
- git add -f .
- git commit -m "Latest helm chart updates on successful gha-test build ${{ github.run_number }} auto-pushed to gh-pages"
- git remote add origin-pages https://${{ secrets.GH_TOKEN }}@github.com/SolaceProducts/pubsubplus-kubernetes-quickstart.git > /dev/null 2>&1
- git push --quiet --set-upstream origin-pages gh-pages
- popd
- echo "Updated and pushed GH pages!"
- elif [ ${{ github.ref }} != 'refs/heads/gh-pages' ] && [ ${{ github.repository_owner }} != 'SolaceProducts' ] ; then
- echo "Using $TESTRUNBRANCH on ${{ github.repository_owner }}"
- git clone --quiet --branch=gh-pages https://${{ secrets.GH_TOKEN }}@github.com/${{ github.repository }} gh-pages > /dev/null 2>&1
- rm -rf gh-pages/helm-charts-openshift; mkdir -p gh-pages/helm-charts-openshift
- mv pubsubplus-openshift-*.tgz gh-pages/helm-charts-openshift/
- helm repo index gh-pages/helm-charts-openshift/ --url https://solacedev.github.io/pubsubplus-kubernetes-quickstart/helm-charts-openshift
- mv pubsubplus-*.tgz gh-pages/helm-charts/
- helm repo index gh-pages/helm-charts/ --url https://solacedev.github.io/pubsubplus-kubernetes-quickstart/helm-charts
- pushd gh-pages
- git add -f .
- git commit -m "Latest helm chart updates on successful gha-test build ${{ github.run_number }} auto-pushed to gh-pages"
- git remote add origin-pages https://${{ secrets.GH_TOKEN }}@github.com/${{ github.repository }}.git > /dev/null 2>&1
- git push --quiet --set-upstream origin-pages gh-pages
- popd
- echo "Updated and pushed GH pages!"
- fi
-
- - name: Delete test resources (Cleanup)
- if: ${{ always() }}
- run: |
- gcloud container clusters delete $TESTCLUSTERNAME --quiet --zone us-east4-a
- gcloud compute disks list | grep gha-test | sed 1d $rpt | while read -r a b c; do gcloud compute disks delete $a --zone $b --quiet; done
+name: build
+
+# Controls when the action will run.
+on:
+ # pull_request:
+
+ push:
+
+jobs:
+ deploy:
+ name: K8s QuickStart CI test
+ runs-on: ubuntu-latest
+ timeout-minutes: 30
+
+ steps:
+ - name: Set env and tools
+ run: |
+ echo "TESTCLUSTERNAME=k8s-gha-test-$(date +%s)" >> $GITHUB_ENV
+ echo "TESTRUNBRANCH=${GITHUB_REF##*/}" >> $GITHUB_ENV
+ #
+ sudo gem update --system 3.0.6
+ sudo gem install yaml-lint
+ sudo snap install kubectl --classic
+ kubectl version --client
+ curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash # setup Helm 3
+ docker ps
+ go version
+
+ - name: Checkout
+ uses: actions/checkout@v2
+
+ - name: Set up Cloud SDK
+ uses: google-github-actions/setup-gcloud@v0
+ with:
+ project_id: ${{ secrets.GCP_PROJECT_ID }}
+ service_account_key: ${{ secrets.GCP_SA_KEY }}
+ export_default_credentials: true
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Linting yaml files and chart
+ run: |
+ yaml-lint -n pubsubplus/*.yaml
+ helm lint pubsubplus
+
+ - name: Setup K8s env in GKE
+ run: |
+ mkdir gke_test; pushd gke_test
+ wget https://raw.githubusercontent.com/SolaceProducts/solace-gke-quickstart/master/scripts/create_cluster.sh
+ chmod +x create_cluster.sh
+ ./create_cluster.sh -z us-east4-a,us-east4-b,us-east4-c -c $TESTCLUSTERNAME -m e2-standard-2
+ gcloud container clusters get-credentials $TESTCLUSTERNAME --zone us-east4-a --project capable-stream-180018
+ popd
+ kubectl get statefulset,svc,pods,pvc,pv
+
+ - name: Setup pod modifier
+ run: |
+ pushd solace-pod-modifier-admission-plugin
+ make image-build image-push IMAGE=${{ secrets.DOCKERHUB_TEST_IMAGE }}
+ make deploy IMAGE=${{ secrets.DOCKERHUB_TEST_IMAGE }}
+ sleep 2
+ timeout 20 bash -c 'while ! kubectl get pods -n solace-pod-modifier | grep Running ; do sleep 1; done'
+ timeout 20 bash -c 'while ! kubectl get MutatingWebhookConfiguration | grep pod-modifier.solace.com ; do sleep 1; done'
+ kubectl label namespace default pod-modifier.solace.com=enabled # prep namespace for use
+ popd
+
+ - name: Deploy HA broker and test
+ run: |
+ REPO=solace/solace-pubsub-standard
+ TAG=latest
+ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=*"
+ kubectl create secret tls test-tls --key="tls.key" --cert="tls.crt"
+ helm install my-release pubsubplus --set solace.size=dev,solace.redundancy=true,solace.podModifierEnabled=true,tls.enabled=true,tls.serverCertificatesSecret=test-tls,solace.usernameAdminPassword=admin,image.repository=$REPO,image.tag=$TAG
+ kubectl get statefulset,svc,pods,pvc,pv --show-labels
+ echo "Waiting for broker to become active"
+ sleep 40; kubectl describe nodes
+ until kubectl get pods --show-labels | grep pubsubplus-0 | grep -m 1 -E '1/1'; do sleep 10; done
+ until kubectl get pods --show-labels | grep pubsubplus-1 | grep -m 1 -E '1/1'; do sleep 10; done
+ until kubectl get pods --show-labels | grep pubsubplus-2 | grep -m 1 -E '1/1'; do sleep 10; done
+ until kubectl get pods --show-labels | grep pubsubplus- | grep -m 1 -E 'active=true'; do sleep 10; done
+ kubectl get pods -o yaml | grep "memory: 1965Mi" # test small monitor memory
+ kubectl get pvc | grep 2Gi # test small monitor storage
+ helm test my-release | grep Phase | grep Succeeded
+ kubectl get statefulset,svc,pods,pvc,pv --show-labels
+ bash -c 'if [[ `kubectl get po --show-labels | grep -c "1/1"` -ne 3 ]]; then echo "Some pods are not ready!"; kubectl get po --show-labels; exit 1; fi'
+ export url="$(kubectl get statefulset,svc,pods,pvc,pv --show-labels | grep LoadBalancer | awk '{print $4}')"; echo $url
+ curl -O https://sftp.solace.com/download/SDKPERF_C_LINUX64
+ tar -xvf SDKPERF_C_LINUX64
+ pubSubTools/sdkperf_c -cip=tcp://$url:55555 -mn=10000 -mr=0 -ptl=t1 -stl=t1 | grep "Total Messages"
+ pubSubTools/sdkperf_c -cip=tcps://$url:55443 -mn=10000 -mr=0 -ptl=t1 -stl=t1 | grep "Total Messages"
+ sleep 30
+ curl -k -sS -u admin:admin https://$url:1943/SEMP -d ""
+ curl -k -sS -u admin:admin https://$url:1943/SEMP -d ""
+ if [[ -z `curl -sS -u admin:admin http://$url:8080/SEMP -d "" | grep "Up"` ]] ; then echo "config-sync not up!"; exit 1; fi
+ helm list
+
+ - name: Upgrade HA broker and test
+ run: |
+ REPO=solace/solace-pubsub-standard
+ UPGRADETAG=$(wget -q https://registry.hub.docker.com/v1/repositories/solace/solace-pubsub-standard/tags -O - | sed -e 's/[][]//g' -e 's/"//g' -e 's/ //g' | tr '}' '\n' | awk -F: '{print $3}' | sort -t "." -k1,1n -k2,2n -k3,3n | tail -1)
+ helm upgrade my-release pubsubplus --set solace.size=dev,solace.redundancy=true,solace.podModifierEnabled=true,tls.enabled=true,tls.serverCertificatesSecret=test-tls,solace.usernameAdminPassword=admin,image.repository=$REPO,image.tag=$UPGRADETAG,storage.useStorageGroup=true
+ kubectl get statefulset,svc,pods,pvc,pv --show-labels
+ echo "Waiting for broker to become active after upgrade"
+ statefulset_name=$(kubectl get statefulset | grep pubsubplus | awk '{print $1}')
+ until kubectl rollout status statefulset $statefulset_name -w | grep "rolling update complete"; do sleep 10; done
+ until kubectl get pods --show-labels | grep pubsubplus-0 | grep -m 1 -E '1/1'; do sleep 10; done
+ until kubectl get pods --show-labels | grep pubsubplus-1 | grep -m 1 -E '1/1'; do sleep 10; done
+ until kubectl get pods --show-labels | grep pubsubplus-2 | grep -m 1 -E '1/1'; do sleep 10; done
+ until kubectl get pods --show-labels | grep pubsubplus- | grep -m 1 -E 'active=true'; do sleep 10; done
+ helm test my-release | grep Phase | grep Succeeded
+ kubectl get statefulset,svc,pods,pvc,pv --show-labels
+ bash -c 'if [[ `kubectl get po --show-labels | grep -c "1/1"` -ne 3 ]]; then echo "Some pods are not ready!"; kubectl get po --show-labels; exit 1; fi'
+ export url="$(kubectl get statefulset,svc,pods,pvc,pv --show-labels | grep LoadBalancer | awk '{print $4}')"; echo $url
+ pubSubTools/sdkperf_c -cip=tcp://$url:55555 -mn=10000 -mr=0 -ptl=t1 -stl=t1 | grep "Total Messages"
+ pubSubTools/sdkperf_c -cip=tcps://$url:55443 -mn=10000 -mr=0 -ptl=t1 -stl=t1 | grep "Total Messages"
+ sleep 10
+ curl -k -sS -u admin:admin https://$url:1943/SEMP -d ""
+ curl -k -sS -u admin:admin https://$url:1943/SEMP -d ""
+ if [[ -z `curl -sS -u admin:admin http://$url:8080/SEMP -d "" | grep "Up"` ]] ; then echo "config-sync not up!"; exit 1; fi
+ helm list
+ helm delete $(helm list | grep deployed | awk '{print $1}')
+ kubectl delete pvc --all
+
+ - name: Create chart variants
+ run: |
+ bash docs/helm-charts/create-chart-variants.sh; # Create chart variants
+ helm lint pubsubplus
+ helm install --generate-name pubsubplus --dry-run
+ helm lint pubsubplus-ha
+ helm install --generate-name pubsubplus-ha --dry-run
+ helm lint pubsubplus-dev
+ helm install --generate-name pubsubplus-dev --dry-run
+ helm lint pubsubplus-openshift
+ helm install --generate-name pubsubplus-openshift --dry-run
+ helm lint pubsubplus-openshift-ha
+ helm install --generate-name pubsubplus-openshift-ha --dry-run
+ helm lint pubsubplus-openshift-dev
+ helm install --generate-name pubsubplus-openshift-dev --dry-run
+
+ - name: Publish artifacts
+ run: |
+ # Two groups of Helm repos are created:
+ # 1 - for general Helm charts that are hosted by Solace from gh-pages
+ # 2 - for OpenShift variants that will be further submitted to OpenShift repo
+ git config --global user.name "GitHub Actions Automation"
+ git config --global user.email "<>"
+ mkdir gh-pages; # Now update gh-pages
+ if [ ${{ github.ref }} == 'refs/heads/master' ] && [ ${{ github.repository_owner }} == 'SolaceProducts' ] ; then
+ echo "Using master on SolaceProducts"
+ git clone --quiet --branch=gh-pages https://${{ secrets.GH_TOKEN }}@github.com/SolaceProducts/pubsubplus-kubernetes-quickstart gh-pages > /dev/null 2>&1
+ rm -rf gh-pages/helm-charts-openshift; mkdir -p gh-pages/helm-charts-openshift
+ mv pubsubplus-openshift-*.tgz gh-pages/helm-charts-openshift/
+ helm repo index gh-pages/helm-charts-openshift/ --url https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts-openshift
+ mv pubsubplus-*.tgz gh-pages/helm-charts/
+ helm repo index gh-pages/helm-charts/ --url https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
+ pushd gh-pages
+ git add -f .
+ git commit -m "Latest helm chart updates on successful gha-test build ${{ github.run_number }} auto-pushed to gh-pages"
+ git remote add origin-pages https://${{ secrets.GH_TOKEN }}@github.com/SolaceProducts/pubsubplus-kubernetes-quickstart.git > /dev/null 2>&1
+ git push --quiet --set-upstream origin-pages gh-pages
+ popd
+ echo "Updated and pushed GH pages!"
+ elif [ ${{ github.ref }} != 'refs/heads/gh-pages' ] && [ ${{ github.repository_owner }} != 'SolaceProducts' ] ; then
+ echo "Using $TESTRUNBRANCH on ${{ github.repository_owner }}"
+ git clone --quiet --branch=gh-pages https://${{ secrets.GH_TOKEN }}@github.com/${{ github.repository }} gh-pages > /dev/null 2>&1
+ rm -rf gh-pages/helm-charts-openshift; mkdir -p gh-pages/helm-charts-openshift
+ mv pubsubplus-openshift-*.tgz gh-pages/helm-charts-openshift/
+ helm repo index gh-pages/helm-charts-openshift/ --url https://solacedev.github.io/pubsubplus-kubernetes-quickstart/helm-charts-openshift
+ mv pubsubplus-*.tgz gh-pages/helm-charts/
+ helm repo index gh-pages/helm-charts/ --url https://solacedev.github.io/pubsubplus-kubernetes-quickstart/helm-charts
+ pushd gh-pages
+ git add -f .
+ git commit -m "Latest helm chart updates on successful gha-test build ${{ github.run_number }} auto-pushed to gh-pages"
+ git remote add origin-pages https://${{ secrets.GH_TOKEN }}@github.com/${{ github.repository }}.git > /dev/null 2>&1
+ git push --quiet --set-upstream origin-pages gh-pages
+ popd
+ echo "Updated and pushed GH pages!"
+ fi
+
+ - name: Delete test resources (Cleanup)
+ if: ${{ always() }}
+ run: |
+ gcloud container clusters delete $TESTCLUSTERNAME --quiet --zone us-east4-a
+ gcloud compute disks list | grep gha-test | sed 1d $rpt | while read -r a b c; do gcloud compute disks delete $a --zone $b --quiet; done
diff --git a/LICENSE b/LICENSE
index 8dada3ed..c0ee8129 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,201 +1,201 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index 0dd5cc75..c18115d0 100644
--- a/README.md
+++ b/README.md
@@ -1,123 +1,123 @@
-[![Actions Status](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/workflows/build/badge.svg?branch=master)](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/actions?query=workflow%3Abuild+branch%3Amaster)
-
-# Install a Solace PubSub+ Software Event Broker onto a Kubernetes cluster
-
-The [Solace PubSub+ Platform](https://solace.com/products/platform/)'s [software event broker](https://solace.com/products/event-broker/software/) efficiently streams event-driven information between applications, IoT devices and user interfaces running in the cloud, on-premises, and hybrid environments using open APIs and protocols like AMQP, JMS, MQTT, REST and WebSocket. It can be installed into a variety of public and private clouds, PaaS, and on-premises environments, and brokers in multiple locations can be linked together in an [event mesh](https://solace.com/what-is-an-event-mesh/) to dynamically share events across the distributed enterprise.
-
-## Overview
-
-This project is a best practice template intended for development and demo purposes. The tested and recommended Solace PubSub+ Software Event Broker version is 9.10.
-
-This document provides a quick getting started guide to install a software event broker in various configurations onto a [Kubernetes](https://kubernetes.io/docs/home/) cluster.
-
-Detailed documentation is provided in the [Solace PubSub+ Software Event Broker on Kubernetes Documentation](docs/PubSubPlusK8SDeployment.md). Consult the [Deployment Coonsiderations](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#pubsub-event-broker-deployment-considerations) section of the Documentation when planning your deployment.
-
-This document is applicable to any platform supporting Kubernetes, with specific hints on how to set up a simple MiniKube deployment on a Linux-based machine. To view examples of other Kubernetes platforms see:
-
-- [Deploying a Solace PubSub+ Software Event Broker HA group onto a Google Kubernetes Engine](//github.com/SolaceProducts/solace-gke-quickstart )
-- [Deploying a Solace PubSub+ Software Event Broker HA Group onto an OpenShift 4 platform](//github.com/SolaceProducts/solace-openshift-quickstart )
-- Deploying a Solace PubSub+ Software Event Broker HA Group onto Amazon EKS (Amazon Elastic Container Service for Kubernetes): follow the [AWS documentation](//docs.aws.amazon.com/eks/latest/userguide/getting-started.html ) to set up EKS then this guide to deploy.
-- [Install a Solace PubSub+ Software Event Broker onto a Pivotal Container Service (PKS) cluster](//github.com/SolaceProducts/solace-pks )
-- Deploying a Solace PubSub+ Software Event Broker HA Group onto Azure Kubernetes Service (AKS): follow the [Azure documentation](//docs.microsoft.com/en-us/azure/aks/ ) to deploy an AKS cluster then this guide to deploy.
-
-## How to deploy the Solace PubSub+ Software Event Broker onto Kubernetes
-
-Solace PubSub+ Software Event Broker can be deployed in either a three-node High-Availability (HA) group or as a single-node standalone deployment. For simple test environments that need only to validate application functionality, a single instance will suffice. Note that in production, or any environment where message loss cannot be tolerated, an HA deployment is required.
-
-We recommend using the Helm tool for convenience. An [alternative method](/docs/PubSubPlusK8SDeployment.md#alternative-deployment-with-generating-templates-for-the-kubernetes-kubectl-tool) using generated templates is also provided.
-
-In this quick start we go through the steps to set up a PubSub+ Software Event Broker using [Solace PubSub+ Helm charts](//artifacthub.io/packages/search?ts_query_web=solace).
-
-There are three Helm chart variants available with default small-size configurations:
-1. `pubsubplus-dev` - recommended PubSub+ Software Event Broker for Developers (standalone) - no guaranteed performance
-2. `pubsubplus` - PubSub+ Software Event Broker standalone, supporting 100 connections
-3. `pubsubplus-ha` - PubSub+ Software Event Broker HA, supporting 100 connections
-
-For other PubSub+ Software Event Broker configurations or sizes, refer to the [PubSub+ Software Event Broker Helm Chart Reference](/pubsubplus/README.md).
-
-### 1. Get a Kubernetes environment
-
-Follow your Kubernetes provider's instructions ([other options available here](https://kubernetes.io/docs/setup/)). Ensure you meet [minimum CPU, Memory and Storage requirements](docs/PubSubPlusK8SDeployment.md#cpu-and-memory-requirements) for the targeted PubSub+ Software Event Broker configuration size. Important: the broker resource requirements refer to available resources on a [Kubernetes node](https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/#kube-scheduler).
-> Note: If using [MiniKube](https://kubernetes.io/docs/setup/learning-environment/minikube/), use `minikube start` with specifying the options `--memory` and `--cpu` to assign adequate resources to the MiniKube VM. The recommended memory is 1GB plus the minimum requirements of your event broker.
-
-Also have the `kubectl` tool [installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/) locally.
-
-Check to ensure your Kubernetes environment is ready:
-```bash
-# This shall return worker nodes listed and ready
-kubectl get nodes
-```
-
-### 2. Install and configure Helm
-
-Follow the [Helm Installation notes of your target release](https://github.com/helm/helm/releases) for your platform.
-Note: Helm v2 is no longer supported. For Helm v2 support refer to [earlier versions of the chart](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/releases).
-
-On Linux a simple option to set up the latest stable release is to run:
-
-```bash
-curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
-```
-
-Helm is configured properly if the command `helm version` returns no error.
-
-
-### 3. Install the Solace PubSub+ Software Event Broker with default configuration
-
-- Add the Solace Helm charts to your local Helm repo:
-```bash
- helm repo add solacecharts https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
-```
-- By default the publicly available [latest Docker image of PubSub+ Software Event Broker Standard Edition](https://hub.Docker.com/r/solace/solace-pubsub-standard/tags/) will be used. Specify a different image or [use a Docker image from a private registry](/docs/PubSubPlusK8SDeployment.md#using-private-registries) if required. If using a non-default image, add the `--set image.repository=,image.tag=` values to the commands below.
-- Generally, for configuration options and ways to override default configuration values (using `--set` is one the options), consult the [PubSub+ Software Event Broker Helm Chart Reference](/pubsubplus/README.md#configuration).
-- Use one of the following chart variants to create a deployment:
-
-a) Create a Solace PubSub+ Software Event Broker deployment for development purposes using `pubsubplus-dev`. It requires a minimum of 1 CPU and 2 GB of memory available to the event broker pod.
-```bash
-# Deploy PubSub+ Software Event Broker Standard edition for developers
-helm install my-release solacecharts/pubsubplus-dev
-```
-
-b) Create a Solace PubSub+ standalone deployment, supporting 100 connections scaling using `pubsubplus`. A minimum of 2 CPUs and 4 GB of memory must be available to the event broker pod.
-```bash
-# Deploy PubSub+ Software Event Broker Standard edition, standalone
-helm install my-release solacecharts/pubsubplus
-```
-
-c) Create a Solace PubSub+ HA deployment, supporting 100 connections scaling using `pubsubplus-ha`. The minimum resource requirements are 2 CPU and 4 GB of memory available to each of the three event broker pods.
-```bash
-# Deploy PubSub+ Software Event Broker Standard edition, HA
-helm install my-release solacecharts/pubsubplus-ha
-```
-
-The above options will start the deployment and write related information and notes to the screen.
-
-> Note: When using MiniKube, there is no integrated Load Balancer, which is the default service type. For a workaround, execute `minikube service my-release-pubsubplus-dev` to expose the services. Services will be accessible directly using the NodePort instead of direct Port access, for which the mapping can be obtained from `kubectl describe service my-release-pubsubplus-dev`.
-
-Wait for the deployment to complete following the information printed on the console.
-
-Refer to the detailed PubSub+ Kubernetes documentation for:
-* [Validating the deployment](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#validating-the-deployment); or
-* [Troubleshooting](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#troubleshooting)
-* [Modifying or Upgrading](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#modifying-or-upgrading-a-deployment)
-* [Deleting the deployment](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#deleting-a-deployment)
-
-## Contributing
-
-Please read [CONTRIBUTING.md](CONTRIBUTING.md) for details on our code of conduct, and the process for submitting pull requests to us.
-
-## Authors
-
-See the list of [contributors](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/graphs/contributors) who participated in this project.
-
-## License
-
-This project is licensed under the Apache License, Version 2.0. - See the [LICENSE](LICENSE) file for details.
-
-## Resources
-
-For more information about Solace technology in general please visit these resources:
-
-- The Solace Developer Portal website at: [solace.dev](//solace.dev/)
-- Understanding [Solace technology](//solace.com/products/platform/)
-- Ask the [Solace community](//dev.solace.com/community/).
+[![Actions Status](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/workflows/build/badge.svg?branch=master)](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/actions?query=workflow%3Abuild+branch%3Amaster)
+
+# Install a Solace PubSub+ Software Event Broker onto a Kubernetes cluster
+
+The [Solace PubSub+ Platform](https://solace.com/products/platform/)'s [software event broker](https://solace.com/products/event-broker/software/) efficiently streams event-driven information between applications, IoT devices and user interfaces running in the cloud, on-premises, and hybrid environments using open APIs and protocols like AMQP, JMS, MQTT, REST and WebSocket. It can be installed into a variety of public and private clouds, PaaS, and on-premises environments, and brokers in multiple locations can be linked together in an [event mesh](https://solace.com/what-is-an-event-mesh/) to dynamically share events across the distributed enterprise.
+
+## Overview
+
+This project is a best practice template intended for development and demo purposes. The tested and recommended Solace PubSub+ Software Event Broker version is 9.10.
+
+This document provides a quick getting started guide to install a software event broker in various configurations onto a [Kubernetes](https://kubernetes.io/docs/home/) cluster.
+
+Detailed documentation is provided in the [Solace PubSub+ Software Event Broker on Kubernetes Documentation](docs/PubSubPlusK8SDeployment.md). Consult the [Deployment Coonsiderations](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#pubsub-event-broker-deployment-considerations) section of the Documentation when planning your deployment.
+
+This document is applicable to any platform supporting Kubernetes, with specific hints on how to set up a simple MiniKube deployment on a Linux-based machine. To view examples of other Kubernetes platforms see:
+
+- [Deploying a Solace PubSub+ Software Event Broker HA group onto a Google Kubernetes Engine](//github.com/SolaceProducts/solace-gke-quickstart )
+- [Deploying a Solace PubSub+ Software Event Broker HA Group onto an OpenShift 4 platform](//github.com/SolaceProducts/solace-openshift-quickstart )
+- Deploying a Solace PubSub+ Software Event Broker HA Group onto Amazon EKS (Amazon Elastic Container Service for Kubernetes): follow the [AWS documentation](//docs.aws.amazon.com/eks/latest/userguide/getting-started.html ) to set up EKS then this guide to deploy.
+- [Install a Solace PubSub+ Software Event Broker onto a Pivotal Container Service (PKS) cluster](//github.com/SolaceProducts/solace-pks )
+- Deploying a Solace PubSub+ Software Event Broker HA Group onto Azure Kubernetes Service (AKS): follow the [Azure documentation](//docs.microsoft.com/en-us/azure/aks/ ) to deploy an AKS cluster then this guide to deploy.
+
+## How to deploy the Solace PubSub+ Software Event Broker onto Kubernetes
+
+Solace PubSub+ Software Event Broker can be deployed in either a three-node High-Availability (HA) group or as a single-node standalone deployment. For simple test environments that need only to validate application functionality, a single instance will suffice. Note that in production, or any environment where message loss cannot be tolerated, an HA deployment is required.
+
+We recommend using the Helm tool for convenience. An [alternative method](/docs/PubSubPlusK8SDeployment.md#alternative-deployment-with-generating-templates-for-the-kubernetes-kubectl-tool) using generated templates is also provided.
+
+In this quick start we go through the steps to set up a PubSub+ Software Event Broker using [Solace PubSub+ Helm charts](//artifacthub.io/packages/search?ts_query_web=solace).
+
+There are three Helm chart variants available with default small-size configurations:
+1. `pubsubplus-dev` - recommended PubSub+ Software Event Broker for Developers (standalone) - no guaranteed performance
+2. `pubsubplus` - PubSub+ Software Event Broker standalone, supporting 100 connections
+3. `pubsubplus-ha` - PubSub+ Software Event Broker HA, supporting 100 connections
+
+For other PubSub+ Software Event Broker configurations or sizes, refer to the [PubSub+ Software Event Broker Helm Chart Reference](/pubsubplus/README.md).
+
+### 1. Get a Kubernetes environment
+
+Follow your Kubernetes provider's instructions ([other options available here](https://kubernetes.io/docs/setup/)). Ensure you meet [minimum CPU, Memory and Storage requirements](docs/PubSubPlusK8SDeployment.md#cpu-and-memory-requirements) for the targeted PubSub+ Software Event Broker configuration size. Important: the broker resource requirements refer to available resources on a [Kubernetes node](https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/#kube-scheduler).
+> Note: If using [MiniKube](https://kubernetes.io/docs/setup/learning-environment/minikube/), use `minikube start` with specifying the options `--memory` and `--cpus` to assign adequate resources to the MiniKube VM. The recommended memory is 1GB plus the minimum requirements of your event broker.
+
+Also have the `kubectl` tool [installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/) locally.
+
+Check to ensure your Kubernetes environment is ready:
+```bash
+# This shall return worker nodes listed and ready
+kubectl get nodes
+```
+
+### 2. Install and configure Helm
+
+Follow the [Helm Installation notes of your target release](https://github.com/helm/helm/releases) for your platform.
+Note: Helm v2 is no longer supported. For Helm v2 support refer to [earlier versions of the chart](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/releases).
+
+On Linux a simple option to set up the latest stable release is to run:
+
+```bash
+curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
+```
+
+Helm is configured properly if the command `helm version` returns no error.
+
+
+### 3. Install the Solace PubSub+ Software Event Broker with default configuration
+
+- Add the Solace Helm charts to your local Helm repo:
+```bash
+ helm repo add solacecharts https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
+```
+- By default the publicly available [latest Docker image of PubSub+ Software Event Broker Standard Edition](https://hub.Docker.com/r/solace/solace-pubsub-standard/tags/) will be used. Specify a different image or [use a Docker image from a private registry](/docs/PubSubPlusK8SDeployment.md#using-private-registries) if required. If using a non-default image, add the `--set image.repository=,image.tag=` values to the commands below.
+- Generally, for configuration options and ways to override default configuration values (using `--set` is one the options), consult the [PubSub+ Software Event Broker Helm Chart Reference](/pubsubplus/README.md#configuration).
+- Use one of the following chart variants to create a deployment:
+
+a) Create a Solace PubSub+ Software Event Broker deployment for development purposes using `pubsubplus-dev`. It requires a minimum of 1 CPU and 2 GB of memory available to the event broker pod.
+```bash
+# Deploy PubSub+ Software Event Broker Standard edition for developers
+helm install my-release solacecharts/pubsubplus-dev
+```
+
+b) Create a Solace PubSub+ standalone deployment, supporting 100 connections scaling using `pubsubplus`. A minimum of 2 CPUs and 4 GB of memory must be available to the event broker pod.
+```bash
+# Deploy PubSub+ Software Event Broker Standard edition, standalone
+helm install my-release solacecharts/pubsubplus
+```
+
+c) Create a Solace PubSub+ HA deployment, supporting 100 connections scaling using `pubsubplus-ha`. The minimum resource requirements are 2 CPU and 4 GB of memory available to each of the three event broker pods.
+```bash
+# Deploy PubSub+ Software Event Broker Standard edition, HA
+helm install my-release solacecharts/pubsubplus-ha
+```
+
+The above options will start the deployment and write related information and notes to the screen.
+
+> Note: When using MiniKube, there is no integrated Load Balancer, which is the default service type. For a workaround, execute `minikube service my-release-pubsubplus-ha` to expose the services. Services will be accessible directly using the NodePort instead of direct Port access, for which the mapping can be obtained from `kubectl describe service my-release-pubsubplus-ha`.
+
+Wait for the deployment to complete following the information printed on the console.
+
+Refer to the detailed PubSub+ Kubernetes documentation for:
+* [Validating the deployment](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#validating-the-deployment); or
+* [Troubleshooting](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#troubleshooting)
+* [Modifying or Upgrading](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#modifying-or-upgrading-a-deployment)
+* [Deleting the deployment](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#deleting-a-deployment)
+
+## Contributing
+
+Please read [CONTRIBUTING.md](CONTRIBUTING.md) for details on our code of conduct, and the process for submitting pull requests to us.
+
+## Authors
+
+See the list of [contributors](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/graphs/contributors) who participated in this project.
+
+## License
+
+This project is licensed under the Apache License, Version 2.0. - See the [LICENSE](LICENSE) file for details.
+
+## Resources
+
+For more information about Solace technology in general please visit these resources:
+
+- The Solace Developer Portal website at: [solace.dev](//solace.dev/)
+- Understanding [Solace technology](//solace.com/products/platform/)
+- Ask the [Solace community](//dev.solace.com/community/).
diff --git a/docs/PubSubPlusK8SDeployment.md b/docs/PubSubPlusK8SDeployment.md
index 8ea19514..880bff26 100644
--- a/docs/PubSubPlusK8SDeployment.md
+++ b/docs/PubSubPlusK8SDeployment.md
@@ -1,833 +1,1037 @@
-# Solace PubSub+ Software Event Broker on Kubernetes Deployment Documentation
-
-This document provide detailed information for deploying Solace PubSub+ Software Event Broker on Kubernetes.
-
-* For a hands-on quick start, refer to the [Quick Start guide](/README.md).
-* For the `pubsubplus` Helm chart configuration options, refer to the [PubSub+ Software Event Broker Helm Chart Reference](/pubsubplus/README.md).
-
-This document is applicable to any platform supporting Kubernetes.
-
-Contents:
- * [**The Solace PubSub+ Software Event Broker**](#the-solace-pubsub-software-event-broker)
- * [**Overview**](#overview)
- * [**PubSub+ Event Broker Deployment Considerations**](#pubsub-software-event-broker-deployment-considerations)
- + [Deployment scaling](#deployment-scaling)
- + [CPU and Memory Requirements](#cpu-and-memory-requirements)
- + [Disk Storage](#disk-storage)
- - [Using the default or an existing storage class](#using-the-default-or-an-existing-storage-class)
- - [Creating a new storage class](#creating-a-new-storage-class)
- - [Using an existing PVC (Persistent Volume Claim)](#using-an-existing-pvc-persistent-volume-claim-)
- - [Using a pre-created provider-specific volume](#using-a-pre-created-provider-specific-volume)
- + [Exposing the PubSub+ Event Broker Services](#exposing-the-pubsub-software-event-broker-services)
- - [Using pod label "active" to identify the active event broker node](#using-pod-label-active-to-identify-the-active-event-broker-node)
- + [Enabling use of TLS to access broker services](#enabling-use-of-tls-to-access-broker-services)
- - [Setting up TLS](#setting-up-tls)
- - [Rotating the server key](#rotating-the-server-key)
- + [The PubSub+ Docker image](#the-pubsub-software-event-broker-docker-image)
- - [Using a public registry](#using-a-public-registry)
- - [Using private registries](#using-private-registries)
- - [Using ImagePullSecrets for signed images](#using-imagepullsecrets-for-signed-images)
- + [Security considerations](#security-considerations)
- - [Using Security Context](#using-security-context)
- - [Enabling pod label "active" in a tight security environment](#enabling-pod-label-active-in-a-tight-security-environment)
- * [**Deployment Prerequisites**](#deployment-prerequisites)
- + [Platform and tools setup](#platform-and-tools-setup)
- - [Install the `kubectl` command-line tool](#install-the-kubectl-command-line-tool)
- - [Perform any necessary Kubernetes platform-specific setup](#perform-any-necessary-kubernetes-platform-specific-setup)
- - [Install and setup the Helm package manager](#install-and-setup-the-helm-package-manager)
- * [**Deployment steps**](#deployment-steps)
- + [Deployment steps using Helm](#deployment-steps-using-helm)
- + [Alternative Deployment with generating templates for the Kubernetes `kubectl` tool](#alternative-deployment-with-generating-templates-for-the-kubernetes-kubectl-tool)
- * [**Validating the Deployment**](#validating-the-deployment)
- + [Gaining admin access to the event broker](#gaining-admin-access-to-the-event-broker)
- - [Admin Password](#admin-password)
- - [WebUI, SolAdmin and SEMP access](#webui-soladmin-and-semp-access)
- - [Solace CLI access](#solace-cli-access)
- - [SSH access to individual event brokers](#ssh-access-to-individual-event-brokers)
- + [Testing data access to the event broker](#testing-data-access-to-the-event-broker)
- * [**Troubleshooting**](#troubleshooting)
- + [Viewing logs](#viewing-logs)
- + [Viewing events](#viewing-events)
- + [PubSub+ Software Event Broker troubleshooting](#pubsub-software-event-broker-troubleshooting)
- - [General Kubernetes troubleshooting hints](#general-kubernetes-troubleshooting-hints)
- - [Pods stuck in not enough resources](#pods-stuck-in-not-enough-resources)
- - [Pods stuck in no storage](#pods-stuck-in-no-storage)
- - [Pods stuck in CrashLoopBackoff, Failed or Not Ready](#pods-stuck-in-crashloopbackoff-failed-or-not-ready)
- - [No Pods listed](#no-pods-listed)
- - [Security constraints](#security-constraints)
- * [**Modifying or upgrading a Deployment**](#modifying-or-upgrading-a-deployment)
- - [Upgrade example](#upgrade-example)
- - [Modification example](#modification-example)
- * [**Re-installing a Deployment**](#re-installing-a-deployment)
- * [**Deleting a Deployment**](#deleting-a-deployment)
-
-
-
-
-## The Solace PubSub+ Software Event Broker
-
-The [PubSub+ Software Event Broker](https://solace.com/products/event-broker/) of the [Solace PubSub+ Platform](https://solace.com/products/platform/) efficiently streams event-driven information between applications, IoT devices and user interfaces running in the cloud, on-premises, and hybrid environments using open APIs and protocols like AMQP, JMS, MQTT, REST and WebSocket. It can be installed into a variety of public and private clouds, PaaS, and on-premises environments, and brokers in multiple locations can be linked together in an [event mesh](https://solace.com/what-is-an-event-mesh/) to dynamically share events across the distributed enterprise.
-
-## Overview
-
-This document assumes a basic understanding of [Kubernetes concepts](https://kubernetes.io/docs/concepts/).
-
-For an example deployment diagram, check out the [PubSub+ Event Broker on Google Kubernetes Engine (GKE) quickstart](https://github.com/SolaceProducts/pubsubplus-gke-quickstart#how-to-deploy-solace-pubsub-software-event-broker-onto-gke).
-
-Multiple YAML templates define the PubSub+ Kubernetes deployment with several parameters as deployment options. The templates are packaged as the `pubsubplus` [Helm chart](//helm.sh/docs/topics/charts/) to enable easy customization by only specifying the non-default parameter values, without the need to edit the template files.
-
-There are two deployment options described in this document:
-* The recommended option is to use the [Kubernetes Helm tool](https://github.com/helm/helm/blob/master/README.md), which can also manage your deployment's lifecycle, including upgrade and delete.
-* Another option is to generate a set of templates with customized values from the PubSub+ Helm chart and then use the Kubernetes native `kubectl` tool to deploy. The deployment will use the authorizations of the requesting user. However, in this case, Helm will not be able to manage your Kubernetes rollouts lifecycle.
-
-The next sections will provide details on the PubSub+ Helm chart, dependencies and customization options, followed by [deployment prerequisites](#deployment-prerequisites) and the actual [deployment steps](#deployment-steps).
-
-## PubSub+ Software Event Broker Deployment Considerations
-
-The following diagram illustrates the template organization used for the PubSub+ Deployment chart. Note that the minimum is shown in this diagram to give you some background regarding the relationships and major functions.
-![alt text](/docs/images/template_relationship.png "`pubsubplus` chart template relationship")
-
-The StatefulSet template controls the pods of a PubSub+ Software Event Broker deployment. It also mounts the scripts from the ConfigMap and the files from the Secrets and maps the event broker data directories to a storage volume through a StorageClass, if configured. The Service template provides the event broker services at defined ports. The Service-Discovery template is only used internally, so pods in a PubSub+ event broker redundancy group can communicate with each other in an HA setting.
-
-All the `pubsubplus` chart parameters are documented in the [PubSub+ Software Event Broker Helm Chart](/pubsubplus/README.md#configuration) reference.
-
-### Deployment scaling
-
-Solace PubSub+ Software Event Broker event broker can be vertically scaled by specifying the [number of concurrent client connections](//docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/System-Scaling-Parameters.htm#max-client-connections), controlled by the `solace.size` chart parameter.
-
-Depending on the `solace.redundancy` parameter, one event router pod is deployed in a single-node standalone deployment or three pods if deploying a [High-Availability (HA) group](//docs.solace.com/Overviews/SW-Broker-Redundancy-and-Fault-Tolerance.htm).
-
-Horizontal scaling is possible through [connecting multiple deployments](//docs.solace.com/Overviews/DMR-Overview.htm).
-
-### CPU and Memory Requirements
-
-The following CPU and memory requirements (for each pod) are summarized here from the [Solace documentation](//docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/System-Resource-Requirements.htm#res-req-container) for the possible `pubsubplus` chart `solace.size` parameter values:
-* `dev`: no guaranteed performance, minimum requirements: 1 CPU, 3.4 GiB memory
-* `prod100`: up to 100 connections, minimum requirements: 2 CPU, 3.4 GiB memory
-* `prod1k`: up to 1,000 connections, minimum requirements: 2 CPU, 6.4 GiB memory
-* `prod10k`: up to 10,000 connections, minimum requirements: 4 CPU, 12.2 GiB memory
-* `prod100k`: up to 100,000 connections, minimum requirements: 8 CPU, 30.3 GiB memory
-* `prod200k`: up to 200,000 connections, minimum requirements: 12 CPU, 51.4 GiB memory
-
-### Disk Storage
-
-The [PubSub+ deployment uses disk storage](//docs.solace.com/Configuring-and-Managing/Configuring-Storage.htm#Storage-) for logging, configuration, guaranteed messaging and other purposes, allocated from Kubernetes volumes.
-
-Storage size (`storage.size` parameter) requirements for the scaling tiers:
-* `dev`: no guaranteed performance: 5GB
-* `prod100`: up to 100 connections, 7GB
-* `prod1k`: up to 1,000 connections, 14GB
-* `prod10k`: up to 10,000 connections, 18GB
-* `prod100k`: up to 100,000 connections, 30GB
-* `prod200k`: up to 200,000 connections, 34GB
-
-Using a persistent storage is recommended, otherwise if pod-local storage is used data will be lost with the loss of a pod. The `storage.persistent` parameter is set to `true` by default.
-
-The `pubsubplus` chart supports allocation of new storage volumes or mounting volumes with existing data. To avoid data corruption ensure to allocate clean new volumes for new deployments.
-
-The recommended default allocation is to use Kubernetes [Storage Classes](//kubernetes.io/docs/concepts/storage/storage-classes/) utilizing [Dynamic Volume Provisioning](//kubernetes.io/docs/concepts/storage/dynamic-provisioning/). The `pubsubplus` chart deployment will create a Persistent Volume Claim (PVC) specifying the size and the Storage Class of the requested volume and a Persistent Volume (PV) that meets the requirements will be allocated. Both the PVC and PV names will be linked to the deployment's name. When deleting the event broker pod(s) or even the entire deployment, the PVC and the allocated PV will not be deleted, so potentially complex configuration is preserved. They will be re-mounted and reused with the existing configuration when a new pod starts (controlled by the StatefulSet, automatically matched to the old pod even in an HA deployment) or deployment with the same as the old name is started. Explicitly delete a PVC if no longer needed, which will delete the corresponding PV - refer to [Deleting a Deployment](#deleting-a-deployment).
-
-Instead of using a storage class, the `pubsubplus` chart also allows you describe how to assign storage by adding your own YAML fragment in the `storage.customVolumeMount` parameter. The fragment is inserted for the `data` volume in the `{spec.template.spec.volumes}` section of the ConfigMap. Note that in this case the `storage.useStorageClass` parameter is ignored.
-
-Followings are examples of how to specify parameter values in common use cases:
-
-#### Using the default or an existing storage class
-
-Set the `storage.useStorageClass` parameter to use a particular storage class or leave this parameter to default undefined to allocate from your platform's "default" storage class - ensure it exists.
-```bash
-# Check existing storage classes
-kubectl get storageclass
-```
-
-#### Creating a new storage class
-
-Create a [specific storage class](//kubernetes.io/docs/concepts/storage/storage-classes/#provisioner) if no existing storage class meets your needs. Refer to your Kubernetes environment's documentation if a StorageClass needs to be created or to understand the differences if there are multiple options. Example:
-```yaml
-# AWS fast storage class example
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: fast
-provisioner: kubernetes.io/aws-ebs
-parameters:
- type: io1
- fsType: xsf
-```
-
-If using NFS, or generally if allocating from a defined Kubernetes [Persistent Volume](//kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes), specify a `storageClassName` in the PV manifest as in this NFS example, then set the `storage.useStorageClass` parameter to the same:
-```yaml
-# Persistent Volume example
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: pv0003
-spec:
- storageClassName: nfs
- capacity:
- storage: 5Gi
- volumeMode: Filesystem
- accessModes:
- - ReadWriteOnce
- persistentVolumeReclaimPolicy: Recycle
- mountOptions:
- - hard
- - nfsvers=4.1
- nfs:
- path: /tmp
- server: 172.17.0.2
-```
-> Note: NFS is currently supported for development and demo purposes. If using NFS also set the `storage.slow` parameter to 'true'.
-
-
-#### Using an existing PVC (Persistent Volume Claim)
-
-You can to use an existing PVC with its associated PV for storage, but it must be taken into account that the deployment will try to use any existing, potentially incompatible, configuration data on that volume.
-
-Provide this custom YAML fragment in `storage.customVolumeMount`:
-
-```yaml
- customVolumeMount: |
- persistentVolumeClaim:
- claimName: existing-pvc-name
-```
-
-#### Using a pre-created provider-specific volume
-
-The PubSub+ Software Event Broker Kubernetes deployment is expected to work with all [types of volumes](//kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes ) your environment supports. In this case provide the specifics on mounting it in a custom YAML fragment in `storage.customVolumeMount`.
-
-The following shows how to implement the [gcePersistentDisk example](//kubernetes.io/docs/concepts/storage/volumes/#gcepersistentdisk); note how the portion of the pod manifest example after `{spec.volumes.name}` is specified:
-```yaml
- customVolumeMount: |
- gcePersistentDisk:
- pdName: my-data-disk
- fsType: ext4
-```
-
-
-Another example is using [hostPath](//kubernetes.io/docs/concepts/storage/volumes/#hostpath):
-```yaml
- customVolumeMount: |
- hostPath:
- # directory location on host
- path: /data
- # this field is optional
- type: Directory
-```
-
-### Exposing the PubSub+ Software Event Broker Services
-
-[PubSub+ services](//docs.solace.com/Configuring-and-Managing/Default-Port-Numbers.htm#Software) can be exposed through one of the [Kubernetes service types](//kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) by specifying the `service.type` parameter:
-
-* LoadBalancer - an external load balancer (default)
-* NodePort
-* ClusterIP
-
-To support [Internal load balancers](//kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer), provider-specific service annotation may be added through defining the `service.annotations` parameter.
-
-The `service.ports` parameter defines the services exposed. It specifies the event broker `containerPort` that provides the service and the mapping to the `servicePort` where the service can be accessed when using LoadBalancer or ClusterIP. Note that there is no control over which port services are mapped when using NodePort.
-
-When using Helm to initiate a deployment, notes will be provided on the screen about how to obtain the service addresses and ports specific to your deployment - follow the "Services access" section of the notes.
-
-A deployment is ready for service requests when there is a Solace pod that is running, `1/1` ready, and the pod's label is "active=true." The exposed `pubsubplus` service will forward traffic to that active event broker node. **Important**: service means here [Guaranteed Messaging level of Quality-of-Service (QoS) of event messages persistence](//docs.solace.com/PubSub-Basics/Guaranteed-Messages.htm). Messaging traffic will not be forwarded if service level is degraded to [Direct Messages](//docs.solace.com/PubSub-Basics/Direct-Messages.htm) only.
-
-#### Using pod label "active" to identify the active event broker node
-
-This section provides more information about what is required to achieve the correct label for the pod hosting the active event broker node.
-
-Use `kubectl get pods --show-labels` to check for the status of the "active" label. In a stable deployment, one of the message routing nodes with ordinal 0 or 1 shall have the label `active=true`. You can find out if there is an issue by [checking events](#viewing-events) for related ERROR reported.
-
-This label is set by the `readiness_check.sh` script in `pubsubplus/templates/solaceConfigMap.yaml`, triggered by the StatefulSet's readiness probe. For this to happen the followings are required:
-
-- the Solace pods must be able to communicate with each-other at port 8080 and internal ports using the Service-Discovery service.
-- the Kubernetes service account associated with the Solace pod must have sufficient rights to patch the pod's label when the active event broker is service ready
-- the Solace pods must be able to communicate with the Kubernetes API at `kubernetes.default.svc.cluster.local` at port $KUBERNETES_SERVICE_PORT. You can find out the address and port by [SSH into the pod](#ssh-access-to-individual-message-brokers).
-
-### Enabling use of TLS to access broker services
-
-#### Setting up TLS
-
-Default deployment does not have TLS over TCP enabled to access broker services. Although the exposed `service.ports` include ports for secured TCP, only the insecure ports can be used by default.
-
-To enable accessing services over TLS a server key and certificate must be configured on the broker.
-
-It is assumed that a provider out of scope of this document will be used to create a server key and certificate for the event broker, that meet the [requirements described in the Solace Documentation](https://docs.solace.com/Configuring-and-Managing/Managing-Server-Certs.htm). If the server key is password protected it shall be transformed to an unencrypted key, e.g.: `openssl rsa -in encryedprivate.key -out unencryed.key`.
-
-The server key and certificate must be packaged in a Kubernetes secret, for example by [creating a TLS secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets). Example:
-```
-kubectl create secret tls --key="" --cert=""
-```
-
-This secret name and related parameters shall be specified when deploying the PubSub+ Helm chart:
-```
-tls:
- enabled: true # set to false by default
- serverCertificatesSecret: # replace by the actual name
- certFilename: # optional, default if not provided: tls.crt
- certKeyFilename: # optional, default if not provided: tls.key
-```
-
-> Note: ensure filenames are matching the files reported from running `kubectl describe secret `.
-
-Here is an example new deployment with TLS enabled using default `certFilename` and `certKeyFilename`:
-```
-helm install my-release solacecharts/pubsubplus \
---set tls.enabled=true,tls.serverCertificatesSecret=
-```
-
-Important: it is not possible to update an existing deployment to enable TLS that has been created without TLS enabled, by a simply using the [modify deployment](#modifying-or-upgrading-a-deployment) procedure. In this case, for the first time, certificates need to be [manually loaded and set up](//docs.solace.com/Configuring-and-Managing/Managing-Server-Certs.htm) on each broker node. After that it is possible to use `helm upgrade` with a secret specified.
-
-#### Rotating the server key
-
-In the event the server key or certificate need to be rotated a new Kubernetes secret must be created, which may require deleting and recreating the old secret if using the same name.
-
-Next, if using the same secret name, the broker Pods need to be restarted, one at a time waiting to reach `1/1` availability before continuing on the next one: starting with the Monitor (ordinal -2), followed by the node in backup role with `active=false` label, and finally the third node. If using a new secret name, the [modify deployment](#modifying-or-upgrading-a-deployment) procedure can be used and an automatic rolling update will follow these steps restarting the nodes one at a time.
-
-> Note: a pod restart will result in provisioning the server certificate from the secret again so it will revert back from any other server certificate that may have been provisioned on the broker through other mechanism.
-
-### The PubSub+ Software Event Broker Docker image
-
-The `image.repository` and `image.tag` parameters combined specify the PubSub+ Software Event Broker Docker image to be used for the deployment. They can either point to an image in a public or a private Docker container registry.
-
-#### Using a public registry
-
-The default values are `solace/solace-pubsub-standard/` and `latest`, which is the free PubSub+ Software Event Broker Standard Edition from the [public Solace Docker Hub repo](//hub.docker.com/r/solace/solace-pubsub-standard/). It is generally recommended to set `image.tag` to a specific build for traceability purposes.
-
-#### Using private registries
-
-The following steps are applicable if using a private Docker container registry (e.g.: GCR, ECR or Harbor):
-1. Get the Solace PubSub+ event broker Docker image tar.gz archive
-2. Load the image into the private Docker registry
-
-To get the PubSub+ Software Event Broker Docker image URL, go to the Solace Developer Portal and download the Solace PubSub+ Software Event Broker as a **docker** image or obtain your version from Solace Support.
-
-| PubSub+ Software Event Broker Standard
Docker Image | PubSub+ Software Event Broker Enterprise Evaluation Edition
Docker Image
-| :---: | :---: |
-| Free, up to 1k simultaneous connections,
up to 10k messages per second | 90-day trial version, unlimited |
-| [Download Standard docker image](http://dev.solace.com/downloads/ ) | [Download Evaluation docker image](http://dev.solace.com/downloads#eval ) |
-
-To load the Solace PubSub+ Software Event Broker Docker image into a private Docker registry, follow the general steps below; for specifics, consult the documentation of the registry you are using.
-
-* Prerequisite: local installation of [Docker](//docs.docker.com/get-started/ ) is required
-* Login to the private registry:
-```sh
-sudo docker login ...
-```
-* First, load the image to the local docker registry:
-```sh
-# Options a or b depending on your Docker image source:
-## Option a): If you have a local tar.gz Docker image file
-sudo docker load -i .tar.gz
-## Option b): You can use the public Solace Docker image, such as from Docker Hub
-sudo docker pull solace/solace-pubsub-standard:latest # or specific
-#
-# Verify the image has been loaded and note the associated "IMAGE ID"
-sudo docker images
-```
-* Tag the image with a name specific to the private registry and tag:
-```sh
-sudo docker tag //:
-```
-* Push the image to the private registry
-```sh
-sudo docker push //:
-```
-
-Note that additional steps may be required if using signed images.
-
-#### Using ImagePullSecrets for signed images
-
-An additional ImagePullSecret may be required if using signed images from a private Docker registry, e.g.: Harbor.
-
-Here is an example of creating an ImagePullSecret. Refer to your registry's documentation for the specific details of use.
-
-```sh
-kubectl create secret docker-registry --dockerserver= \
- --docker-username= --docker-password= \
- --docker-email=
-```
-
-Then set the `image.pullSecretName` chart value to ``.
-
-### Security considerations
-
-#### Using Security Context
-
-The event broker container already runs in non-privileged mode.
-
-If `securityContext.enabled` is `true` (default) then the `securityContext.fsGroup` and `securityContext.runAsUser` settings define [the pod security context](//kubernetes.io/docs/tasks/configure-pod-container/security-context/).
-
-If other settings control `fsGroup` and `runAsUser`, e.g: when using a [PodSecurityPolicy](//kubernetes.io/docs/concepts/policy/pod-security-policy/) or an Openshift "restricted" SCC, `securityContext.enabled` shall be set to `false` or ensure specified values do not conflict with the policy settings.
-
-#### Enabling pod label "active" in a tight security environment
-
-Services require [pod label "active"](#using-pod-label-active-to-identify-the-active-event-broker-node) of the serving event broker.
-* In a controlled environment it may be necessary to add a [NetworkPolicy](//kubernetes.io/docs/concepts/services-networking/network-policies/ ) to enable [required communication](#using-pod-label-active-to-identify-the-active-event-broker-node).
-
-#### Securing TLS server key and certificate
-
-Using secrets for TLS server keys and certificates follows Kubernetes recommendations, however, particularly in a production environment, additional steps are required to ensure only authorized access to these secrets following Kubernetes industry best practices, including setting tight RBAC permissions and fixing possible security holes.
-
-## Deployment Prerequisites
-
-### Platform and tools setup
-
-#### Install the `kubectl` command-line tool
-
-Refer to [these instructions](//kubernetes.io/docs/tasks/tools/install-kubectl/) to install `kubectl` if your environment does not already provide this tool or equivalent (like `oc` in OpenShift).
-
-#### Perform any necessary Kubernetes platform-specific setup
-
-This refers to getting your platform ready either by creating a new one or getting access to an existing one. Supported platforms include but are not restricted to:
-* Amazon EKS
-* Azure AKS
-* Google GCP
-* OpenShift
-* MiniKube
-* VMWare PKS
-
-Check your platform running the `kubectl get nodes` command from your command-line client.
-
-#### Install and setup the Helm package manager
-
-The event broker can be deployed using Helm v3.
-> Note: For Helm v2 support refer to [earlier versions of this quickstart](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/releases).
-
-The Helm v3 executable is available from https://github.com/helm/helm/releases . Further documentation is available from https://helm.sh/.
-
-```shell
-curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
-```
-
-## Deployment steps
-
-As discussed in the [Overview](#overview), two types of deployments will be described:
-* Deployment steps using Helm, as package manager
-* Alternative Deployment with generating templates for the Kubernetes `kubectl` tool
-
-### Deployment steps using Helm
-
-The recommended way is to make use of published pre-packaged PubSub+ charts from Solace' public repo and customizing your deployment through [available chart parameters](/pubsubplus/README.md).
-
-Add or refresh a local Solace `solacecharts` repo:
-```bash
-# Add new "solacecharts" repo
-helm repo add solacecharts https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
-# Refresh if needed, e.g.: to use a recently published chart version
-helm repo update solacecharts
-
-# Install from the repo
-helm install my-release solacecharts/pubsubplus
-```
-
-There are three Helm chart variants available with default small-size configurations:
-1. `pubsubplus-dev` - PubSub+ Software Event Broker for Developers (standalone)
-2. `pubsubplus` - PubSub+ Software Event Broker standalone, supporting 100 connections
-3. `pubsubplus-ha` - PubSub+ Software Event Broker HA, supporting 100 connections
-
-Customization options are described in the [PubSub+ Software Event Broker Helm Chart](/pubsubplus/README.md#configuration) reference.
-
-Also, refer to the [quick start guide](/README.md) for additional deployment details.
-
-**More customization options**
-
-If more customization than just using Helm parameters is required, you can create your own fork so templates can be edited:
-```bash
-# This creates a local directory from the published templates
-helm fetch solacecharts/pubsubplus --untar
-# Use the Helm chart from this directory
-helm install ./pubsubplus
-```
-> Note: it is encouraged to raise a [GitHub issue](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/issues/new) to possibly contribute your enhancements back to the project.
-
-### Alternative Deployment with generating templates for the Kubernetes `kubectl` tool
-
-This method will first generate installable Kubernetes templates from this project's Helm charts, then the templates can be installed using the Kubectl tool.
-
-Note that later sections of this document about modifying, upgrading or deleting a Deployment using the Helm tool do not apply.
-
-**Step 1: Generate Kubernetes templates for Solace event broker deployment**
-
-1) Ensure Helm is locally installed.
-
-2) Add or refresh a local Solace `solacecharts` repo:
-```bash
-# Add new "solacecharts" repo
-helm repo add solacecharts https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
-# Refresh if needed, e.g.: to use a recently published chart version
-helm repo update solacecharts
-```
-
-3) Generate the templates:
-
-First, consider if any [configurations](/pubsubplus/README.md#configuration) are required.
-If this is the case then you can add overrides as additional `--set ...` parameters to the `helm template` command, or use an override YAML file.
-
-```sh
-# Create local copy
-helm fetch solacecharts/pubsubplus --untar
-# Create location for the generated templates
-mkdir generated-templates
-# In one of next sample commands replace my-release to the desired release name
-# a) Using all defaults:
-helm template my-release --output-dir ./generated-templates ./pubsubplus
-# b) Example with configuration using --set
-helm template my-release --output-dir ./generated-templates \
- --set solace.redundancy=true \
- ./pubsubplus
-# c) Example with configuration using --set
-helm template my-release --output-dir ./generated-templates \
- -f my-values.yaml \
- ./pubsubplus
-
-```
-The generated set of templates are now available in the `generated-templates` directory.
-
-**Step 2: Deploy the templates on the target system**
-
-Assumptions: `kubectl` is deployed and configured to point to your Kubernetes cluster
-
-1) Optionally, copy the `generated-templates` directory with contents if this is on a different host
-
-2) Initiate the deployment:
-```bash
-kubectl apply --recursive -f ./generated-templates/pubsubplus
-```
-Wait for the deployment to complete, which is then ready to use.
-
-3) To delete the deployment, execute:
-```bash
-kubectl delete --recursive -f ./generated-templates/pubsubplus
-```
-
-
-
-## Validating the Deployment
-
-Now you can validate your deployment on the command line. In this example an HA configuration is deployed with pod/XXX-XXX-pubsubplus-0 being the active event broker/pod. The notation XXX-XXX is used for the unique release name, e.g: "my-release".
-
-```sh
-prompt:~$ kubectl get statefulsets,services,pods,pvc,pv
-NAME READY AGE
-statefulset.apps/my-release-pubsubplus 3/3 13m
-
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-service/kubernetes ClusterIP 10.92.0.1 443/TCP 14d
-service/my-release-pubsubplus LoadBalancer 10.92.13.40 34.67.66.30 2222:30197/TCP,8080:30343/TCP,1943:32551/TCP,55555:30826/TCP,55003:30770/TCP,55443:32583/TCP,8008:32689/TCP,1443:32460/TCP,5672:31960/TCP,1883:32112/TCP,9000:30848/TCP 13m
-service/my-release-pubsubplus-discovery ClusterIP None 8080/TCP,8741/TCP,8300/TCP,8301/TCP,8302/TCP 13m
-
-NAME READY STATUS RESTARTS AGE
-pod/my-release-pubsubplus-0 1/1 Running 0 13m
-pod/my-release-pubsubplus-1 1/1 Running 0 13m
-pod/my-release-pubsubplus-2 1/1 Running 0 13m
-
-NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
-persistentvolumeclaim/data-my-release-pubsubplus-0 Bound pvc-6b0cd358-30c4-11ea-9379-42010a8000c7 30Gi RWO standard 13m
-persistentvolumeclaim/data-my-release-pubsubplus-1 Bound pvc-6b14bc8a-30c4-11ea-9379-42010a8000c7 30Gi RWO standard 13m
-persistentvolumeclaim/data-my-release-pubsubplus-2 Bound pvc-6b24b2aa-30c4-11ea-9379-42010a8000c7 30Gi RWO standard 13m
-
-NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
-persistentvolume/pvc-6b0cd358-30c4-11ea-9379-42010a8000c7 30Gi RWO Delete Bound default/data-my-release-pubsubplus-0 standard 13m
-persistentvolume/pvc-6b14bc8a-30c4-11ea-9379-42010a8000c7 30Gi RWO Delete Bound default/data-my-release-pubsubplus-1 standard 13m
-persistentvolume/pvc-6b24b2aa-30c4-11ea-9379-42010a8000c7 30Gi RWO Delete Bound default/data-my-release-pubsubplus-2 standard 13m
-
-
-prompt:~$ kubectl describe service my-release-pubsubplus
-Name: my-release-pubsubplus
-Namespace: test
-Labels: app.kubernetes.io/instance=my-release
- app.kubernetes.io/managed-by=Tiller
- app.kubernetes.io/name=pubsubplus
- helm.sh/chart=pubsubplus-1.0.0
-Annotations:
-Selector: active=true,app.kubernetes.io/instance=my-release,app.kubernetes.io/name=pubsubplus
-Type: LoadBalancer
-IP: 10.100.200.41
-LoadBalancer Ingress: 34.67.66.30
-Port: ssh 2222/TCP
-TargetPort: 2222/TCP
-NodePort: ssh 30197/TCP
-Endpoints: 10.28.1.20:2222
-:
-:
-```
-
-Generally, all services including management and messaging are accessible through a Load Balancer. In the above example `34.67.66.30` is the Load Balancer's external Public IP to use.
-
-> Note: When using MiniKube, there is no integrated Load Balancer. For a workaround, execute `minikube service XXX-XXX-solace` to expose the services. Services will be accessible directly using mapped ports instead of direct port access, for which the mapping can be obtained from `kubectl describe service XXX-XX-solace`.
-
-### Gaining admin access to the event broker
-
-There are [multiple management tools](//docs.solace.com/Management-Tools.htm ) available. The WebUI is the recommended simplest way to administer the event broker for common tasks.
-
-#### Admin Password
-
-A random admin password will be generated if it has not been provided at deployment using the `solace.usernameAdminPassword` parameter, refer to the the information from `helm status` how to retrieve it.
-
-**Important:** Every time `helm install` or `helm upgrade` is called a new admin password will be generated, which may break an existing deployment. Therefore ensure to always provide the password from the initial deployment as `solace.usernameAdminPassword=` parameter to subsequent `install` and `upgrade` commands.
-
-#### WebUI, SolAdmin and SEMP access
-
-Use the Load Balancer's external Public IP at port 8080 to access these services.
-
-#### Solace CLI access
-
-If you are using a single event broker and are used to working with a CLI event broker console access, you can SSH into the event broker as the `admin` user using the Load Balancer's external Public IP:
-
-```sh
-
-$ssh -p 2222 admin@35.202.131.158
-Solace PubSub+ Standard
-Password:
-
-Solace PubSub+ Standard Version 9.4.0.105
-
-The Solace PubSub+ Standard is proprietary software of
-Solace Corporation. By accessing the Solace PubSub+ Standard
-you are agreeing to the license terms and conditions located at
-//www.solace.com/license-software
-
-Copyright 2004-2019 Solace Corporation. All rights reserved.
-
-To purchase product support, please contact Solace at:
-//dev.solace.com/contact-us/
-
-Operating Mode: Message Routing Node
-
-XXX-XXX-pubsubplus-0>
-```
-
-If you are using an HA deployment, it is better to access the CLI through the Kubernets pod and not directly via SSH.
-
-* Loopback to SSH directly on the pod
-
-```sh
-kubectl exec -it XXX-XXX-pubsubplus-0 -- bash -c "ssh -p 2222 admin@localhost"
-```
-
-* Loopback to SSH on your host with a port-forward map
-
-```sh
-kubectl port-forward XXX-XXX-pubsubplus-0 62222:2222 &
-ssh -p 62222 admin@localhost
-```
-
-This can also be mapped to individual event brokers in the deployment via port-forward:
-
-```
-kubectl port-forward XXX-XXX-pubsubplus-0 8081:8080 &
-kubectl port-forward XXX-XXX-pubsubplus-1 8082:8080 &
-kubectl port-forward XXX-XXX-pubsubplus-2 8083:8080 &
-```
-
-#### SSH access to individual event brokers
-
-For direct access, use:
-
-```sh
-kubectl exec -it XXX-XXX-pubsubplus- -- bash
-```
-
-### Testing data access to the event broker
-
-To test data traffic though the newly created event broker instance, visit the Solace Developer Portal [APIs & Protocols](//www.solace.dev/ ). Under each option there is a Publish/Subscribe tutorial that will help you get started and provide the specific default port to use.
-
-Use the external Public IP to access the deployment. If a port required for a protocol is not opened, refer to the [Modification example](#modification-example) how to open it up.
-
-## Troubleshooting
-
-### General Kubernetes troubleshooting hints
-https://kubernetes.io/docs/tasks/debug-application-cluster/debug-application/
-
-### Checking the reason for failed resources
-
-Run `kubectl get statefulsets,services,pods,pvc,pv` to get an understanding of the state, then drill down to get more information on a failed resource to reveal possible Kubernetes resourcing issues, e.g.:
-```sh
-kubectl describe pvc
-```
-
-### Viewing logs
-
-Detailed logs from the currently running container in a pod:
-```sh
-kubectl logs XXX-XXX-pubsubplus-0 -f # use -f to follow live
-```
-
-It is also possible to get the logs from a previously terminated or failed container:
-```sh
-kubectl logs XXX-XXX-pubsubplus-0 -p
-```
-
-Filtering on bringup logs (helps with initial troubleshooting):
-```sh
-kubectl logs XXX-XXX-pubsubplus-0 | grep [.]sh
-```
-
-### Viewing events
-
-Kubernetes collects [all events for a cluster in one pool](//kubernetes.io/docs/tasks/debug-application-cluster/events-stackdriver ). This includes events related to the PubSub+ deployment.
-
-It is recommended to watch events when creating or upgrading a Solace deployment. Events clear after about an hour. You can query all available events:
-
-```sh
-kubectl get events -w # use -w to watch live
-```
-
-### PubSub+ Software Event Broker troubleshooting
-
-#### Pods stuck in not enough resources
-
-If pods stay in pending state and `kubectl describe pods` reveals there are not enough memory or CPU resources, check the [resource requirements of the targeted scaling tier](#cpu-and-memory-requirements) of your deployment and ensure adequate node resources are available.
-
-#### Pods stuck in no storage
-
-Pods may also stay in pending state because [storage requirements](#storage) cannot be met. Check `kubectl get pv,pvc`. PVCs and PVs should be in bound state and if not then use `kubectl describe pvc` for any issues.
-
-Unless otherwise specified, a default storage class must be available for default PubSub+ deployment configuration.
-```bash
-kubectl get storageclasses
-```
-
-#### Pods stuck in CrashLoopBackoff, Failed or Not Ready
-
-Pods stuck in CrashLoopBackoff, or Failed, or Running but not Ready "active" state, usually indicate an issue with available Kubernetes node resources or with the container OS or the event broker process start.
-
-* Try to understand the reason following earlier hints in this section.
-* Try to recreate the issue by deleting and then reinstalling the deployment - ensure to remove related PVCs if applicable as they would mount volumes with existing, possibly outdated or incompatible database - and watch the [logs](#viewing-logs) and [events](#viewing-events) from the beginning. Look for ERROR messages preceded by information that may reveal the issue.
-
-#### No Pods listed
-
-If no pods are listed related to your deployment check the StatefulSet for any clues:
-```
-kubectl describe statefulset my-release-pubsubplus
-```
-
-#### Security constraints
-
-Your Kubernetes environment's security constraints may also impact successful deployment. Review the [Security considerations](#security-considerations) section.
-
-## Modifying or upgrading a Deployment
-
-Use the `helm upgrade` command to upgrade/modify the event broker deployment: request the required modifications to the chart in passing the new/changed parameters or creating an upgrade `` YAML file. When chaining multiple `-f ` to Helm, the override priority will be given to the last (right-most) file specified.
-
-For both version upgrade and modifications, the "RollingUpdate" strategy of the Kubernetes StatefulSet applies: pods in the StatefulSet are restarted with new values in reverse order of ordinals, which means for PubSubPlus first the monitoring node (ordinal 2), then backup (ordinal 1) and finally the primary node (ordinal 0).
-
-For the next examples, assume a deployment has been created with some initial overrides for a development HA cluster:
-```bash
-helm install my-release solacecharts/pubsubplus --set solace.size=dev,solace.redundancy=true
-```
-
-#### Getting the currently used parameter values
-
-Currently used parameter values are the default chart parameter values overlayed with value-overrides.
-
-To get the default chart parameter values, check `helm show values solacecharts/pubsubplus`.
-
-To get the current value-overrides, execute:
-```
-$ helm get values my-release
-USER-SUPPLIED VALUES:
-solace:
- redundancy: true
- size: dev
-```
-**Important:** this may not show, but be aware of an additional non-default parameter:
-```
-solace:
- usernameAdminPassword: jMzKoW39zz # The value is just an example
-```
-This has been generated at the initial deployment if not specified and must be used henceforth for all change requests, to keep the same. See related note in the [Admin Password section](#admin-password).
-
-#### Upgrade example
-
-To **upgrade** the version of the event broker running within a Kubernetes cluster:
-
-- Add the new version of the event broker to your container registry, then
-- Either:
- * Set the new image in the Helm upgrade command, also ensure to include the original overrides:
-```bash
-helm upgrade my-release solacecharts/pubsubplus \
- --set solace.size=dev,solace.redundancy=true,solace.usernameAdminPassword: jMzKoW39zz \
- --set image.repository=//solace-pubsub-standard,image.tag=NEW.VERSION.XXXXX,image.pullPolicy=IfNotPresent
-```
- * Or create a simple `version-upgrade.yaml` file and use that to upgrade the release:
-```bash
-tee ./version-upgrade.yaml <<-EOF # include original and new overrides
-solace:
- redundancy: true
- size: dev
- usernameAdminPassword: jMzKoW39zz
-image:
- repository: //solace-pubsub-standard
- tag: NEW.VERSION.XXXXX
- pullPolicy: IfNotPresent
-EOF
-helm upgrade my-release solacecharts/pubsubplus -f version-upgrade.yaml
-```
-> Note: upgrade will begin immediately, in the order of pod 2, 1 and 0 (Monitor, Backup, Primary) taken down for upgrade in an HA deployment. This will affect running event broker instances, result in potentially multiple failovers and requires connection-retries configured in the client.
-
-#### Modification example
-
-Similarly, to **modify** deployment parameters, you need pass modified value-overrides. Passing the same value-overrides to upgrade will result in no change.
-
-In this example we will add the AMQP encrypted (TLS) port to the loadbalancer - it is not included by default.
-
-First [look up](//docs.solace.com/Configuring-and-Managing/Default-Port-Numbers.htm#Software) the port number for MQTT TLS: the required port is 5671.
-
-Next, create an update file with the additional contents:
-```bash
-tee ./port-update.yaml <<-EOF # :
-service:
- ports:
- - servicePort: 5671
- containerPort: 5671
- protocol: TCP
- name: amqptls
-EOF
-```
-
-Now upgrade the deployment, passing the changes. This time the original `--set` value-overrides are combined with the override file:
-```bash
-helm upgrade my-release solacecharts/pubsubplus \
- --set solace.size=dev,solace.redundancy=true,solace.usernameAdminPassword: jMzKoW39zz \
- --values port-update.yaml
-```
-
-## Re-installing a Deployment
-
-If using *persistent* storage broker data will not be deleted upon `helm delete`.
-
-In this case the deployment can be reinstalled and continue from the point before the `helm delete` command was executed by running `helm install` again, using the **same** release name and parameters as the previous run. This includes explicitly providing the same admin password as before.
-
-```
-# Initial deployment:
-helm install my-release solacecharts/pubsubplus --set solace.size=dev,solace.redundancy=true
-# This will auto-generate an admin password
-# Retrieve the admin password, follow instructions from the output of "helm status", section Admin credentials
-# Delete this deployment
-helm delete my-release
-# Reinstall deployment, assuming persistent storage. Notice the admin password specified
-helm install my-release solacecharts/pubsubplus --set solace.size=dev,solace.redundancy=true,solace.usernameAdminPassword=jMzKoW39zz
-# Original deployment is now back up
-```
-
-## Deleting a Deployment
-
-Use Helm to delete a deployment, also called a release:
-```
-helm delete my-release
-```
-
-Check what has remained from the deployment:
-```
-kubectl get statefulsets,services,pods,pvc,pv
-```
-
-> Note: Helm will not clean up PVCs and related PVs. Use `kubectl delete` to delete PVCs is associated data is no longer required.
-
-
-
-
-
-
-
-
+# Solace PubSub+ Software Event Broker on Kubernetes Deployment Documentation
+
+This document provides detailed information for deploying Solace PubSub+ Software Event Broker on Kubernetes.
+
+* For a hands-on quick start, refer to the [Quick Start guide](/README.md).
+* For the `pubsubplus` Helm chart configuration options, refer to the [PubSub+ Software Event Broker Helm Chart Reference](/pubsubplus/README.md).
+
+This document is applicable to any platform provider supporting Kubernetes.
+
+Contents:
+ * [**The Solace PubSub+ Software Event Broker**](#the-solace-pubsub-software-event-broker)
+ * [**Overview**](#overview)
+ * [**PubSub+ Event Broker Deployment Considerations**](#pubsub-software-event-broker-deployment-considerations)
+ + [Deployment scaling](#deployment-scaling)
+ - [Simplified vertical scaling](#simplified-vertical-scaling)
+ - [Comprehensive vertical scaling](#comprehensive-vertical-scaling)
+ - [Reducing resource requirements of Monitoring Nodes in an HA deployment](#reducing-resource-requirements-of-monitoring-nodes-in-an-ha-deployment)
+ + [Disk Storage](#disk-storage)
+ - [Allocating smaller storage to Monitor pods in an HA deployment](#allocating-smaller-storage-to-monitor-pods-in-an-ha-deployment)
+ - [Using the default or an existing storage class](#using-the-default-or-an-existing-storage-class)
+ - [Creating a new storage class](#creating-a-new-storage-class)
+ - [Using an existing PVC (Persistent Volume Claim)](#using-an-existing-pvc-persistent-volume-claim-)
+ - [Using a pre-created provider-specific volume](#using-a-pre-created-provider-specific-volume)
+ + [Exposing the PubSub+ Event Broker Services](#exposing-the-pubsub-software-event-broker-services)
+ - [Specifying Service Type](#specifying-service-type)
+ - [Using Ingress to access event broker services](#using-ingress-to-access-event-broker-services)
+ * [Configuration examples](#configuration-examples)
+ * [HTTP, no TLS](#http-no-tls)
+ * [HTTPS with TLS terminate at ingress](#https-with-tls-terminate-at-ingress)
+ * [HTTPS with TLS re-encrypt at ingress](#https-with-tls-re-encrypt-at-ingress)
+ * [General TCP over TLS with passthrough to broker](#general-tcp-over-tls-with-passthrough-to-broker)
+ - [Using pod label "active" to identify the active event broker node](#using-pod-label-active-to-identify-the-active-event-broker-node)
+ + [Enabling use of TLS to access broker services](#enabling-use-of-tls-to-access-broker-services)
+ - [Setting up TLS](#setting-up-tls)
+ - [Rotating the server key](#rotating-the-server-key)
+ + [The PubSub+ Docker image](#the-pubsub-software-event-broker-docker-image)
+ - [Using a public registry](#using-a-public-registry)
+ - [Using private registries](#using-private-registries)
+ - [Using ImagePullSecrets for signed images](#using-imagepullsecrets-for-signed-images)
+ + [Security considerations](#security-considerations)
+ - [Using Security Context](#using-security-context)
+ - [Enabling pod label "active" in a tight security environment](#enabling-pod-label-active-in-a-tight-security-environment)
+ * [**Deployment Prerequisites**](#deployment-prerequisites)
+ + [Platform and tools setup](#platform-and-tools-setup)
+ - [Install the `kubectl` command-line tool](#install-the-kubectl-command-line-tool)
+ - [Perform any necessary Kubernetes platform-specific setup](#perform-any-necessary-kubernetes-platform-specific-setup)
+ - [Install and setup the Helm package manager](#install-and-setup-the-helm-package-manager)
+ * [**Deployment steps**](#deployment-steps)
+ + [Deployment steps using Helm](#deployment-steps-using-helm)
+ + [Alternative Deployment with generating templates for the Kubernetes `kubectl` tool](#alternative-deployment-with-generating-templates-for-the-kubernetes-kubectl-tool)
+ * [**Validating the Deployment**](#validating-the-deployment)
+ + [Gaining admin access to the event broker](#gaining-admin-access-to-the-event-broker)
+ - [Admin Password](#admin-password)
+ - [WebUI, SolAdmin and SEMP access](#webui-soladmin-and-semp-access)
+ - [Solace CLI access](#solace-cli-access)
+ - [SSH access to individual event brokers](#ssh-access-to-individual-event-brokers)
+ + [Testing data access to the event broker](#testing-data-access-to-the-event-broker)
+ * [**Troubleshooting**](#troubleshooting)
+ + [Viewing logs](#viewing-logs)
+ + [Viewing events](#viewing-events)
+ + [PubSub+ Software Event Broker troubleshooting](#pubsub-software-event-broker-troubleshooting)
+ - [General Kubernetes troubleshooting hints](#general-kubernetes-troubleshooting-hints)
+ - [Pods stuck in not enough resources](#pods-stuck-in-not-enough-resources)
+ - [Pods stuck in no storage](#pods-stuck-in-no-storage)
+ - [Pods stuck in CrashLoopBackoff, Failed or Not Ready](#pods-stuck-in-crashloopbackoff-failed-or-not-ready)
+ - [No Pods listed](#no-pods-listed)
+ - [Security constraints](#security-constraints)
+ * [**Modifying or upgrading a Deployment**](#modifying-or-upgrading-a-deployment)
+ - [Upgrade example](#upgrade-example)
+ - [Modification example](#modification-example)
+ * [**Re-installing a Deployment**](#re-installing-a-deployment)
+ * [**Deleting a Deployment**](#deleting-a-deployment)
+
+
+
+## The Solace PubSub+ Software Event Broker
+
+The [PubSub+ Software Event Broker](https://solace.com/products/event-broker/) of the [Solace PubSub+ Platform](https://solace.com/products/platform/) efficiently streams event-driven information between applications, IoT devices and user interfaces running in the cloud, on-premises, and hybrid environments using open APIs and protocols like AMQP, JMS, MQTT, REST and WebSocket. It can be installed into a variety of public and private clouds, PaaS, and on-premises environments, and brokers in multiple locations can be linked together in an [event mesh](https://solace.com/what-is-an-event-mesh/) to dynamically share events across the distributed enterprise.
+
+## Overview
+
+This document assumes a basic understanding of [Kubernetes concepts](https://kubernetes.io/docs/concepts/).
+
+For an example deployment diagram, check out the [PubSub+ Event Broker on Google Kubernetes Engine (GKE) quickstart](https://github.com/SolaceProducts/pubsubplus-gke-quickstart#how-to-deploy-solace-pubsub-software-event-broker-onto-gke).
+
+Multiple YAML templates define the PubSub+ Kubernetes deployment with several parameters as deployment options. The templates are packaged as the `pubsubplus` [Helm chart](//helm.sh/docs/topics/charts/) to enable easy customization by only specifying the non-default parameter values, without the need to edit the template files.
+
+There are two deployment options described in this document:
+* The recommended option is to use the [Kubernetes Helm tool](https://github.com/helm/helm/blob/master/README.md), which can also manage your deployment's lifecycle, including upgrade and delete.
+* Another option is to generate a set of templates with customized values from the PubSub+ Helm chart and then use the Kubernetes native `kubectl` tool to deploy. The deployment will use the authorizations of the requesting user. However, in this case, Helm will not be able to manage your Kubernetes rollouts lifecycle.
+
+The next sections will provide details on the PubSub+ Helm chart, dependencies and customization options, followed by [deployment prerequisites](#deployment-prerequisites) and the actual [deployment steps](#deployment-steps).
+
+## PubSub+ Software Event Broker Deployment Considerations
+
+The following diagram illustrates the template organization used for the PubSub+ Deployment chart. Note that the minimum is shown in this diagram to give you some background regarding the relationships and major functions.
+![alt text](/docs/images/template_relationship.png "`pubsubplus` chart template relationship")
+
+The StatefulSet template controls the pods of a PubSub+ Software Event Broker deployment. It also mounts the scripts from the ConfigMap and the files from the Secrets and maps the event broker data directories to a storage volume through a StorageClass, if configured. The Service template provides the event broker services at defined ports. The Service-Discovery template is only used internally, so pods in a PubSub+ event broker redundancy group can communicate with each other in an HA setting.
+
+All the `pubsubplus` chart parameters are documented in the [PubSub+ Software Event Broker Helm Chart](/pubsubplus/README.md#configuration) reference.
+
+### Deployment scaling
+
+Solace PubSub+ Software Event Broker can be scaled vertically by specifying either:
+* `solace.size` - simplified scaling along the maximum number of client connections; or
+* `solace.systemScaling` - enables defining all scaling parameters and pod resources
+
+Depending on the `solace.redundancy` parameter, one event router pod is deployed in a single-node standalone deployment or three pods if deploying a [High-Availability (HA) group](//docs.solace.com/Overviews/SW-Broker-Redundancy-and-Fault-Tolerance.htm).
+
+Horizontal scaling is possible through [connecting multiple deployments](//docs.solace.com/Overviews/DMR-Overview.htm).
+
+#### Simplified vertical scaling
+
+The broker nodes are scaled by the [maximum number of concurrent client connections](//docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/System-Scaling-Parameters.htm#max-client-connections), controlled by the `solace.size` chart parameter.
+
+The broker container CPU and memory resource requirements are assigned according to the tier, and are summarized here from the [Solace documentation](//docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/System-Resource-Requirements.htm#res-req-container) for the possible `solace.size` parameter values:
+* `dev`: no guaranteed performance, minimum requirements: 1 CPU, 3.4 GiB memory
+* `prod100`: up to 100 connections, minimum requirements: 2 CPU, 3.4 GiB memory
+* `prod1k`: up to 1,000 connections, minimum requirements: 2 CPU, 6.4 GiB memory
+* `prod10k`: up to 10,000 connections, minimum requirements: 4 CPU, 12.2 GiB memory
+* `prod100k`: up to 100,000 connections, minimum requirements: 8 CPU, 30.3 GiB memory
+* `prod200k`: up to 200,000 connections, minimum requirements: 12 CPU, 51.4 GiB memory
+
+#### Comprehensive vertical scaling
+
+This option overrides simplified vertical scaling. It enables specifying each supported broker scaling parameter, currently:
+* "maxConnections", in `solace.systemScaling.maxConnections` parameter
+* "maxQueueMessages", in `solace.systemScaling.maxQueueMessages` parameter
+* "maxSpoolUsage", in `solace.systemScaling.maxSpoolUsage` parameter
+
+Additionally, CPU and memory must be sized and provided in `solace.systemScaling.cpu` and `solace.systemScaling.memory` parameters. Use the [Solace online System Resource Calculator](https://docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/System-Resource-Calculator.htm) to determine CPU and memory requirements for the selected scaling parameters.
+
+Note: beyond CPU and memory requirements, required storage size (see next section) also depends significantly on scaling. The calculator can be used to determine that as well.
+
+Also note, that specifying maxConnections, maxQueueMessages and maxSpoolUsage on initial deployment will overwrite the broker’s default values. On the other hand, doing the same using Helm upgrade on an existing deployment will not overwrite these values on brokers configuration, but it can be used to prepare (first step) for a manual scale up through CLI where these parameters can be actually changed (second step).
+
+#### Reducing resource requirements of Monitoring Nodes in an HA deployment
+
+The Kubernetes StatefulSet which controls the pods that make up a PubSub+ broker [deployment in an HA redundancy group](#deployment-scaling) does not distinguish between PubSub+ HA node types: it assigns the same CPU and memory resources to pods hosting worker and monitoring node types, even though monitoring nodes have minimal resource requirements.
+
+To address this, a "solace-pod-modifier" Kubernetes admission plugin is provided as part of this repo: when deployed it intercepts pod create requests and can set the lower resource requirements for broker monitoring nodes only.
+
+Also ensure to define the Helm chart parameter `solace.podModifierEnabled: true` to provide the necessary annotations to the PubSub+ broker pods, which acts as a "control switch" to enable the monitoring pod resource modification.
+
+Refer to the [Readme of the plugin](/solace-pod-modifier-admission-plugin/README.md) for details on how to activate and use it. Note: the plugin requires Kubernetes v1.16 or later.
+
+> Note: the use of the "solace-pod-modifier" Kubernetes admission plugin is not mandatory. If it is not activated or not working then the default behavior applies: monitoring nodes will have the same resource requirements as the worker nodes. If "solace-pod-modifier" is activated later, then as long as the monitoring node pods have the correct annotations they can be deleted and the reduced resources will apply after they are recreated .
+
+### Disk Storage
+
+The [PubSub+ deployment uses disk storage](//docs.solace.com/Configuring-and-Managing/Configuring-Storage.htm#Storage-) for logging, configuration, guaranteed messaging and other purposes, allocated from Kubernetes volumes.
+
+Broker versions prior to 9.12 required separate volumes mounted for each storage functionality, making up a [storage-group from individual storage-elements](https://docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/Configuring-Storage.htm). Versions 9.12 and later can have a single mount storage-group that will be divided up internally, but they still support the legacy mounting of storage-elements. It is recommended to set the parameter `storage.useStorageGroup=true` if using broker version 9.12 or later - do not use it on earlier versions.
+
+If using [simplified vertical scaling](#simplified-vertical-scaling), set following storage size (`storage.size` parameter) for the scaling tiers:
+* `dev`: no guaranteed performance: 5GB
+* `prod100`: up to 100 connections, 7GB
+* `prod1k`: up to 1,000 connections, 14GB
+* `prod10k`: up to 10,000 connections, 18GB
+* `prod100k`: up to 100,000 connections, 30GB
+* `prod200k`: up to 200,000 connections, 34GB
+
+If using [Comprehensive vertical scaling](#comprehensive-vertical-scaling), use the [calculator](https://docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/System-Resource-Calculator.htm) to determine storage size.
+
+Using a persistent storage is recommended, otherwise if pod-local storage is used data will be lost with the loss of a pod. The `storage.persistent` parameter is set to `true` by default.
+
+The `pubsubplus` chart supports allocation of new storage volumes or mounting volumes with existing data. To avoid data corruption ensure to allocate clean new volumes for new deployments.
+
+The recommended default allocation is to use Kubernetes [Storage Classes](//kubernetes.io/docs/concepts/storage/storage-classes/) utilizing [Dynamic Volume Provisioning](//kubernetes.io/docs/concepts/storage/dynamic-provisioning/). The `pubsubplus` chart deployment will create a Persistent Volume Claim (PVC) specifying the size and the Storage Class of the requested volume and a Persistent Volume (PV) that meets the requirements will be allocated. Both the PVC and PV names will be linked to the deployment's name. When deleting the event broker pod(s) or even the entire deployment, the PVC and the allocated PV will not be deleted, so potentially complex configuration is preserved. They will be re-mounted and reused with the existing configuration when a new pod starts (controlled by the StatefulSet, automatically matched to the old pod even in an HA deployment) or deployment with the same as the old name is started. Explicitly delete a PVC if no longer needed, which will delete the corresponding PV - refer to [Deleting a Deployment](#deleting-a-deployment).
+
+Instead of using a storage class, the `pubsubplus` chart also allows you describe how to assign storage by adding your own YAML fragment in the `storage.customVolumeMount` parameter. The fragment is inserted for the `data` volume in the `{spec.template.spec.volumes}` section of the ConfigMap. Note that in this case the `storage.useStorageClass` parameter is ignored.
+
+Followings are examples of how to specify parameter values in common use cases:
+
+#### Allocating smaller storage to Monitor pods in an HA deployment
+
+When deploying PubSub+ in an HA redundancy group, monitoring nodes have minimal storage requirements compared to working nodes. The default `storage.monitorStorageSize` Helm chart value enables setting and creating smaller storage for Monitor pods hosting monitoring nodes as a pre-install hook in an HA deployment (`solace.redundancy=true`), before larger storage would be automatically created. Note that this setting is effective for initial deployments only, cannot be used to upgrade an existing deployment with storage already allocated for monitoring nodes. A workaround is to mark the Monitor pod storage for delete (will not delete it immediately, only after the Monitor pod has been deleted) then follow the steps to [recreate the deployment](#re-installing-a-deployment): `kubectl delete pvc `.
+
+#### Using the default or an existing storage class
+
+Set the `storage.useStorageClass` parameter to use a particular storage class or leave this parameter to default undefined to allocate from your platform's "default" storage class - ensure it exists.
+```bash
+# Check existing storage classes
+kubectl get storageclass
+```
+
+#### Creating a new storage class
+
+Create a [specific storage class](//kubernetes.io/docs/concepts/storage/storage-classes/#provisioner) if no existing storage class meets your needs. Refer to your Kubernetes environment's documentation if a StorageClass needs to be created or to understand the differences if there are multiple options. Example:
+```yaml
+# AWS fast storage class example
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: fast
+provisioner: kubernetes.io/aws-ebs
+parameters:
+ type: io1
+ fsType: xsf
+```
+
+If using NFS, or generally if allocating from a defined Kubernetes [Persistent Volume](//kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes), specify a `storageClassName` in the PV manifest as in this NFS example, then set the `storage.useStorageClass` parameter to the same:
+```yaml
+# Persistent Volume example
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv0003
+spec:
+ storageClassName: nfs
+ capacity:
+ storage: 5Gi
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Recycle
+ mountOptions:
+ - hard
+ - nfsvers=4.1
+ nfs:
+ path: /tmp
+ server: 172.17.0.2
+```
+> Note: NFS is currently supported for development and demo purposes. If using NFS also set the `storage.slow` parameter to 'true'.
+
+
+#### Using an existing PVC (Persistent Volume Claim)
+
+You can to use an existing PVC with its associated PV for storage, but it must be taken into account that the deployment will try to use any existing, potentially incompatible, configuration data on that volume.
+
+Provide this custom YAML fragment in `storage.customVolumeMount`:
+
+```yaml
+ customVolumeMount: |
+ persistentVolumeClaim:
+ claimName: existing-pvc-name
+```
+
+#### Using a pre-created provider-specific volume
+
+The PubSub+ Software Event Broker Kubernetes deployment is expected to work with all [types of volumes](//kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes ) your environment supports. In this case provide the specifics on mounting it in a custom YAML fragment in `storage.customVolumeMount`.
+
+The following shows how to implement the [gcePersistentDisk example](//kubernetes.io/docs/concepts/storage/volumes/#gcepersistentdisk); note how the portion of the pod manifest example after `{spec.volumes.name}` is specified:
+```yaml
+ customVolumeMount: |
+ gcePersistentDisk:
+ pdName: my-data-disk
+ fsType: ext4
+```
+
+
+Another example is using [hostPath](//kubernetes.io/docs/concepts/storage/volumes/#hostpath):
+```yaml
+ customVolumeMount: |
+ hostPath:
+ # directory location on host
+ path: /data
+ # this field is optional
+ type: Directory
+```
+
+### Exposing the PubSub+ Software Event Broker Services
+
+#### Specifying Service Type
+
+[PubSub+ services](//docs.solace.com/Configuring-and-Managing/Default-Port-Numbers.htm#Software) can be exposed through one of the following [Kubernetes service types](//kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) by specifying the `service.type` parameter:
+
+* LoadBalancer (default) - a load balancer, typically externally accessible depending on the K8s provider.
+* NodePort - maps PubSub+ services to a port on a Kubernetes node; external access depends on access to the node.
+* ClusterIP - internal access only from within K8s.
+
+Additionally, for all above service types, external access can be configured through K8s Ingress (see next section).
+
+To support [Internal load balancers](//kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer), provider-specific service annotation may be added through defining the `service.annotations` parameter.
+
+The `service.ports` parameter defines the services exposed. It specifies the event broker `containerPort` that provides the service and the mapping to the `servicePort` where the service can be accessed when using LoadBalancer or ClusterIP. Note that there is no control over which port services are mapped when using NodePort.
+
+When using Helm to initiate a deployment, notes will be provided on the screen about how to obtain the service addresses and ports specific to your deployment - follow the "Services access" section of the notes.
+
+A deployment is ready for service requests when there is a Solace pod that is running, `1/1` ready, and the pod's label is "active=true." The exposed `pubsubplus` service will forward traffic to that active event broker node. **Important**: service means here [Guaranteed Messaging level of Quality-of-Service (QoS) of event messages persistence](//docs.solace.com/PubSub-Basics/Guaranteed-Messages.htm). Messaging traffic will not be forwarded if service level is degraded to [Direct Messages](//docs.solace.com/PubSub-Basics/Direct-Messages.htm) only.
+
+#### Using Ingress to access event broker services
+
+The `LoadBalancer` or `NodePort` service types can be used to expose all services from one PubSub+ broker. [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress) may be used to enable efficient external access from a single external IP address to specific PubSub+ services, potentially provided by multiple brokers.
+
+The following table gives an overview of how external access can be configured for PubSub+ services via Ingress.
+
+| PubSub+ service / protocol, configuration and requirements | HTTP, no TLS | HTTPS with TLS terminate at ingress | HTTPS with TLS re-encrypt at ingress | General TCP over TLS with passthrough to broker |
+| -- | -- | -- | -- | -- |
+| **Notes:** | -- | Requires TLS config on Ingress-controller | Requires TLS config on broker AND TLS config on Ingress-controller | Requires TLS config on broker. Client must use SNI to provide target host |
+| WebSockets, MQTT over WebSockets | Supported | Supported | Supported | Supported (routing via SNI) |
+| REST | Supported with restrictions: if publishing to a Queue, only root path is supported in Ingress rule or must use [rewrite target](https://github.com/kubernetes/ingress-nginx/blob/main/docs/examples/rewrite/README.md) annotation. For Topics, the initial path would make it to the topic name. | Supported, see prev. note | Supported, see prev. note | Supported (routing via SNI) |
+| SEMP | Not recommended to expose management services without TLS | Supported with restrictions: (1) Only root path is supported in Ingress rule or must use [rewrite target](https://github.com/kubernetes/ingress-nginx/blob/main/docs/examples/rewrite/README.md) annotation; (2) Non-TLS access to SEMP [must be enabled](https://docs.solace.com/Configuring-and-Managing/configure-TLS-broker-manager.htm) on broker | Supported with restrictions: only root path is supported in Ingress rule or must use [rewrite target](https://github.com/kubernetes/ingress-nginx/blob/main/docs/examples/rewrite/README.md) annotation | Supported (routing via SNI) |
+| SMF, SMF compressed, AMQP, MQTT | - | - | - | Supported (routing via SNI) |
+| SSH* | - | - | - | - |
+
+*SSH has been listed here for completeness only, external exposure not recommended.
+
+##### Configuration examples
+
+All examples assume NGINX used as ingress controller ([documented here](https://kubernetes.github.io/ingress-nginx/)), selected because NGINX is supported by most K8s providers. For [other ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/#additional-controllers) refer to their respective documentation.
+
+To deploy the NGINX Ingress Controller, refer to the [Quick start in the NGINX documentation](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start). After successful deployment get the ingress External-IP or FQDN with the following command:
+
+`kubectl get service ingress-nginx-controller --namespace=ingress-nginx`
+
+This is the IP (or the IP address the FQDN resolves to) of the ingress where external clients shall target their request and any additional DNS-resolvable hostnames, used for name-based virtual host routing, must also be configured to resolve to this IP address. If using TLS then the host certificate Common Name (CN) and/or Subject Alternative Name (SAN) must be configured to match the respective FQDN.
+
+For options to expose multiple services from potentially multiple brokers, review the [Types of Ingress from the Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#types-of-ingress).
+
+The next examples provide Ingress manifests that can be applied using `kubectl apply -f `. Then check that an external IP address (ingress controller external IP) has been assigned to the rule/service and also that the host/external IP is ready for use as it could take a some time for the address to be populated.
+
+```
+kubectl get ingress
+NAME CLASS HOSTS
+ADDRESS PORTS AGE
+example.address nginx frontend.host
+20.120.69.200 80 43m
+```
+
+##### HTTP, no TLS
+
+The following example configures ingress to [access PubSub+ REST service](https://docs.solace.com/RESTMessagingPrtl/Solace-REST-Example.htm#cURL). Replace `` with the name of the service of your deployment (hint: the service name is similar to your pod names). The port name must match the `service.ports` name in the PubSub+ `values.yaml` file.
+
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: http-plaintext-example
+spec:
+ ingressClassName: nginx
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name:
+ port:
+ name: tcp-rest
+```
+
+External requests shall be targeted to the ingress External-IP at the HTTP port (80) and the specified path.
+
+##### HTTPS with TLS terminate at ingress
+
+Additional to above, this requires specifying a target virtual DNS-resolvable host (here `https-example.foo.com`), which resolves to the ingress External-IP, and a `tls` section. The `tls` section provides the possible hosts and corresponding [TLS secret](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) that includes a private key and a certificate. The certificate must include the virtual host FQDN in its CN and/or SAN, as described above. Hint: [TLS secrets can be easily created from existing files](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets).
+
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: https-ingress-terminated-tls-example
+spec:
+ ingressClassName: nginx
+ tls:
+ - hosts:
+ - https-example.foo.com
+ secretName: testsecret-tls
+ rules:
+ - host: https-example.foo.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name:
+ port:
+ name: tcp-rest
+```
+
+External requests shall be targeted to the ingress External-IP through the defined hostname (here `https-example.foo.com`) at the TLS port (443) and the specified path.
+
+
+##### HTTPS with TLS re-encrypt at ingress
+
+This only differs from above in that the request is forwarded to a TLS-encrypted PubSub+ service port. The broker must have TLS configured but there are no specific requirements for the broker certificate as the ingress does not enforce it.
+
+The difference in the Ingress manifest is an NGINX-specific annotation marking that the backend is using TLS, and the service target port in the last line - it refers now to a TLS backend port:
+
+```yaml
+metadata:
+ :
+ annotations:
+ nginx.ingress.kubernetes.io/backend-protocol: HTTPS
+ :
+spec:
+ :
+ rules:
+ :
+ port:
+ name: tls-rest
+```
+
+##### General TCP over TLS with passthrough to broker
+
+In this case the ingress does not terminate TLS, only provides routing to the broker Pod based on the hostname provided in the SNI extension of the Client Hello at TLS connection setup. Since it will pass through TLS traffic directly to the broker as opaque data, this enables the use of ingress for any TCP-based protocol using TLS as transport.
+
+The TLS passthrough capability must be explicitly enabled on the NGINX ingress controller, as it is off by default. This can be done by editing the `ingress-nginx-controller` "Deployment" in the `ingress-nginx` namespace.
+1. Open the controller for editing: `kubectl edit deployment ingress-nginx-controller --namespace ingress-nginx`
+2. Search where the `nginx-ingress-controller` arguments are provided, insert `--enable-ssl-passthrough` to the list and save. For more information refer to the [NGINX User Guide](https://kubernetes.github.io/ingress-nginx/user-guide/tls/#ssl-passthrough). Also note the potential performance impact of using SSL Passthrough mentioned here.
+
+The Ingress manifest specifies "passthrough" by adding the `nginx.ingress.kubernetes.io/ssl-passthrough: "true"` annotation.
+
+The deployed PubSub+ broker(s) must have TLS configured with a certificate that includes DNS names in CN and/or SAN, that match the host used. In the example the broker server certificate may specify the host `*.broker1.bar.com`, so multiple services can be exposed from `broker1`, distinguished by the host FQDN.
+
+The protocol client must support SNI. It depends on the client if it uses the server certificate CN or SAN for host name validation. Most recent clients use SAN, for example the PubSub+ Java API requires host DNS names in the SAN when using SNI.
+
+With above, an ingress example looks following:
+
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: ingress-passthrough-tls-example
+ annotations:
+ nginx.ingress.kubernetes.io/ssl-passthrough: "true"
+spec:
+ ingressClassName: nginx
+ rules:
+ - host: smf.broker1.bar.com
+ http:
+ paths:
+ - backend:
+ service:
+ name:
+ port:
+ name: tls-smf
+ path: /
+ pathType: ImplementationSpecific
+```
+External requests shall be targeted to the ingress External-IP through the defined hostname (here `smf.broker1.bar.com`) at the TLS port (443) with no path required.
+
+#### Using pod label "active" to identify the active event broker node
+
+This section provides more information about what is required to achieve the correct label for the pod hosting the active event broker node.
+
+Use `kubectl get pods --show-labels` to check for the status of the "active" label. In a stable deployment, one of the message routing nodes with ordinal 0 or 1 shall have the label `active=true`. You can find out if there is an issue by [checking events](#viewing-events) for related ERROR reported.
+
+This label is set by the `readiness_check.sh` script in `pubsubplus/templates/solaceConfigMap.yaml`, triggered by the StatefulSet's readiness probe. For this to happen the followings are required:
+
+- the Solace pods must be able to communicate with each-other at port 8080 and internal ports using the Service-Discovery service.
+- the Kubernetes service account associated with the Solace pod must have sufficient rights to patch the pod's label when the active event broker is service ready
+- the Solace pods must be able to communicate with the Kubernetes API at `kubernetes.default.svc.cluster.local` at port $KUBERNETES_SERVICE_PORT. You can find out the address and port by [SSH into the pod](#ssh-access-to-individual-message-brokers).
+
+### Enabling use of TLS to access broker services
+
+#### Setting up TLS
+
+Default deployment does not have TLS over TCP enabled to access broker services. Although the exposed `service.ports` include ports for secured TCP, only the insecure ports can be used by default.
+
+To enable accessing services over TLS a server key and certificate must be configured on the broker.
+
+It is assumed that a provider out of scope of this document will be used to create a server key and certificate for the event broker, that meet the [requirements described in the Solace Documentation](https://docs.solace.com/Configuring-and-Managing/Managing-Server-Certs.htm). If the server key is password protected it shall be transformed to an unencrypted key, e.g.: `openssl rsa -in encryedprivate.key -out unencryed.key`.
+
+The server key and certificate must be packaged in a Kubernetes secret, for example by [creating a TLS secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets). Example:
+```
+kubectl create secret tls --key="" --cert=""
+```
+
+This secret name and related parameters shall be specified when deploying the PubSub+ Helm chart:
+```
+tls:
+ enabled: true # set to false by default
+ serverCertificatesSecret: # replace by the actual name
+ certFilename: # optional, default if not provided: tls.crt
+ certKeyFilename: # optional, default if not provided: tls.key
+```
+
+> Note: ensure filenames are matching the files reported from running `kubectl describe secret `.
+
+Here is an example new deployment with TLS enabled using default `certFilename` and `certKeyFilename`:
+```
+helm install my-release solacecharts/pubsubplus \
+--set tls.enabled=true,tls.serverCertificatesSecret=
+```
+
+Important: it is not possible to update an existing deployment to enable TLS that has been created without TLS enabled, by a simply using the [modify deployment](#modifying-or-upgrading-a-deployment) procedure. In this case, for the first time, certificates need to be [manually loaded and set up](//docs.solace.com/Configuring-and-Managing/Managing-Server-Certs.htm) on each broker node. After that it is possible to use `helm upgrade` with a secret specified.
+
+#### Rotating the server key
+
+In the event the server key or certificate need to be rotated a new Kubernetes secret must be created, which may require deleting and recreating the old secret if using the same name.
+
+Next, if using the same secret name, the broker Pods need to be restarted, one at a time waiting to reach `1/1` availability before continuing on the next one: starting with the Monitor (ordinal -2), followed by the node in backup role with `active=false` label, and finally the third node. If using a new secret name, the [modify deployment](#modifying-or-upgrading-a-deployment) procedure can be used and an automatic rolling update will follow these steps restarting the nodes one at a time.
+
+> Note: a pod restart will result in provisioning the server certificate from the secret again so it will revert back from any other server certificate that may have been provisioned on the broker through other mechanism.
+
+### The PubSub+ Software Event Broker Docker image
+
+The `image.repository` and `image.tag` parameters combined specify the PubSub+ Software Event Broker Docker image to be used for the deployment. They can either point to an image in a public or a private Docker container registry.
+
+#### Using a public registry
+
+The default values are `solace/solace-pubsub-standard/` and `latest`, which is the free PubSub+ Software Event Broker Standard Edition from the [public Solace Docker Hub repo](//hub.docker.com/r/solace/solace-pubsub-standard/). It is generally recommended to set `image.tag` to a specific build for traceability purposes.
+
+#### Using private registries
+
+The following steps are applicable if using a private Docker container registry (e.g.: GCR, ECR or Harbor):
+1. Get the Solace PubSub+ event broker Docker image tar.gz archive
+2. Load the image into the private Docker registry
+
+To get the PubSub+ Software Event Broker Docker image URL, go to the Solace Developer Portal and download the Solace PubSub+ Software Event Broker as a **docker** image or obtain your version from Solace Support.
+
+| PubSub+ Software Event Broker Standard
Docker Image | PubSub+ Software Event Broker Enterprise Evaluation Edition
Docker Image
+| :---: | :---: |
+| Free, up to 1k simultaneous connections,
up to 10k messages per second | 90-day trial version, unlimited |
+| [Download Standard docker image](http://dev.solace.com/downloads/ ) | [Download Evaluation docker image](http://dev.solace.com/downloads#eval ) |
+
+To load the Solace PubSub+ Software Event Broker Docker image into a private Docker registry, follow the general steps below; for specifics, consult the documentation of the registry you are using.
+
+* Prerequisite: local installation of [Docker](//docs.docker.com/get-started/ ) is required
+* Login to the private registry:
+```sh
+sudo docker login ...
+```
+* First, load the image to the local docker registry:
+```sh
+# Options a or b depending on your Docker image source:
+## Option a): If you have a local tar.gz Docker image file
+sudo docker load -i .tar.gz
+## Option b): You can use the public Solace Docker image, such as from Docker Hub
+sudo docker pull solace/solace-pubsub-standard:latest # or specific
+#
+# Verify the image has been loaded and note the associated "IMAGE ID"
+sudo docker images
+```
+* Tag the image with a name specific to the private registry and tag:
+```sh
+sudo docker tag //:
+```
+* Push the image to the private registry
+```sh
+sudo docker push //:
+```
+
+Note that additional steps may be required if using signed images.
+
+#### Using ImagePullSecrets for signed images
+
+An additional ImagePullSecret may be required if using signed images from a private Docker registry, e.g.: Harbor.
+
+Here is an example of creating an ImagePullSecret. Refer to your registry's documentation for the specific details of use.
+
+```sh
+kubectl create secret docker-registry --dockerserver= \
+ --docker-username= --docker-password= \
+ --docker-email=
+```
+
+Then set the `image.pullSecretName` chart value to ``.
+
+### Security considerations
+
+#### Using Security Context
+
+The event broker container already runs in non-privileged mode.
+
+If `securityContext.enabled` is `true` (default) then the `securityContext.fsGroup` and `securityContext.runAsUser` settings define [the pod security context](//kubernetes.io/docs/tasks/configure-pod-container/security-context/).
+
+If other settings control `fsGroup` and `runAsUser`, e.g: when using a [PodSecurityPolicy](//kubernetes.io/docs/concepts/policy/pod-security-policy/) or an Openshift "restricted" SCC, `securityContext.enabled` shall be set to `false` or ensure specified values do not conflict with the policy settings.
+
+#### Enabling pod label "active" in a tight security environment
+
+Services require [pod label "active"](#using-pod-label-active-to-identify-the-active-event-broker-node) of the serving event broker.
+* In a controlled environment it may be necessary to add a [NetworkPolicy](//kubernetes.io/docs/concepts/services-networking/network-policies/ ) to enable [required communication](#using-pod-label-active-to-identify-the-active-event-broker-node).
+
+#### Securing TLS server key and certificate
+
+Using secrets for TLS server keys and certificates follows Kubernetes recommendations, however, particularly in a production environment, additional steps are required to ensure only authorized access to these secrets following Kubernetes industry best practices, including setting tight RBAC permissions and fixing possible security holes.
+
+## Deployment Prerequisites
+
+### Platform and tools setup
+
+#### Install the `kubectl` command-line tool
+
+Refer to [these instructions](//kubernetes.io/docs/tasks/tools/install-kubectl/) to install `kubectl` if your environment does not already provide this tool or equivalent (like `oc` in OpenShift).
+
+#### Perform any necessary Kubernetes platform-specific setup
+
+This refers to getting your platform ready either by creating a new one or getting access to an existing one. Supported platforms include but are not restricted to:
+* Amazon EKS
+* Azure AKS
+* Google GCP
+* OpenShift
+* MiniKube
+* VMWare PKS
+
+Check your platform running the `kubectl get nodes` command from your command-line client.
+
+#### Install and setup the Helm package manager
+
+The event broker can be deployed using Helm v3.
+> Note: For Helm v2 support refer to [earlier versions of this quickstart](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/releases).
+
+The Helm v3 executable is available from https://github.com/helm/helm/releases . Further documentation is available from https://helm.sh/.
+
+```shell
+curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
+```
+
+## Deployment steps
+
+As discussed in the [Overview](#overview), two types of deployments will be described:
+* Deployment steps using Helm, as package manager
+* Alternative Deployment with generating templates for the Kubernetes `kubectl` tool
+
+### Deployment steps using Helm
+
+The recommended way is to make use of published pre-packaged PubSub+ charts from Solace' public repo and customizing your deployment through [available chart parameters](/pubsubplus/README.md).
+
+Add or refresh a local Solace `solacecharts` repo:
+```bash
+# Add new "solacecharts" repo
+helm repo add solacecharts https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
+# Refresh if needed, e.g.: to use a recently published chart version
+helm repo update solacecharts
+
+# Install from the repo
+helm install my-release solacecharts/pubsubplus
+```
+
+There are three Helm chart variants available with default small-size configurations:
+1. `pubsubplus-dev` - PubSub+ Software Event Broker for Developers (standalone)
+2. `pubsubplus` - PubSub+ Software Event Broker standalone, supporting 100 connections
+3. `pubsubplus-ha` - PubSub+ Software Event Broker HA, supporting 100 connections
+
+Customization options are described in the [PubSub+ Software Event Broker Helm Chart](/pubsubplus/README.md#configuration) reference.
+
+Also, refer to the [quick start guide](/README.md) for additional deployment details.
+
+**More customization options**
+
+If more customization than just using Helm parameters is required, you can create your own fork so templates can be edited:
+```bash
+# This creates a local directory from the published templates
+helm fetch solacecharts/pubsubplus --untar
+# Use the Helm chart from this directory
+helm install ./pubsubplus
+```
+> Note: it is encouraged to raise a [GitHub issue](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/issues/new) to possibly contribute your enhancements back to the project.
+
+### Alternative Deployment with generating templates for the Kubernetes `kubectl` tool
+
+This method will first generate installable Kubernetes templates from this project's Helm charts, then the templates can be installed using the Kubectl tool.
+
+Note that later sections of this document about modifying, upgrading or deleting a Deployment using the Helm tool do not apply.
+
+**Step 1: Generate Kubernetes templates for Solace event broker deployment**
+
+1) Ensure Helm is locally installed.
+
+2) Add or refresh a local Solace `solacecharts` repo:
+```bash
+# Add new "solacecharts" repo
+helm repo add solacecharts https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
+# Refresh if needed, e.g.: to use a recently published chart version
+helm repo update solacecharts
+```
+
+3) Generate the templates:
+
+First, consider if any [configurations](/pubsubplus/README.md#configuration) are required.
+If this is the case then you can add overrides as additional `--set ...` parameters to the `helm template` command, or use an override YAML file.
+
+```sh
+# Create local copy
+helm fetch solacecharts/pubsubplus --untar
+# Create location for the generated templates
+mkdir generated-templates
+# In one of next sample commands replace my-release to the desired release name
+# a) Using all defaults:
+helm template my-release --output-dir ./generated-templates ./pubsubplus
+# b) Example with configuration using --set
+helm template my-release --output-dir ./generated-templates \
+ --set solace.redundancy=true \
+ ./pubsubplus
+# c) Example with configuration using --set
+helm template my-release --output-dir ./generated-templates \
+ -f my-values.yaml \
+ ./pubsubplus
+
+```
+The generated set of templates are now available in the `generated-templates` directory.
+
+**Step 2: Deploy the templates on the target system**
+
+Assumptions: `kubectl` is deployed and configured to point to your Kubernetes cluster
+
+1) Optionally, copy the `generated-templates` directory with contents if this is on a different host
+
+2) Initiate the deployment:
+```bash
+kubectl apply --recursive -f ./generated-templates/pubsubplus
+```
+Wait for the deployment to complete, which is then ready to use.
+
+3) To delete the deployment, execute:
+```bash
+kubectl delete --recursive -f ./generated-templates/pubsubplus
+```
+
+
+
+## Validating the Deployment
+
+Now you can validate your deployment on the command line. In this example an HA configuration is deployed with pod/XXX-XXX-pubsubplus-0 being the active event broker/pod. The notation XXX-XXX is used for the unique release name, e.g: "my-release".
+
+```sh
+prompt:~$ kubectl get statefulsets,services,pods,pvc,pv
+NAME READY AGE
+statefulset.apps/my-release-pubsubplus 3/3 13m
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/kubernetes ClusterIP 10.92.0.1 443/TCP 14d
+service/my-release-pubsubplus LoadBalancer 10.92.13.40 34.67.66.30 2222:30197/TCP,8080:30343/TCP,1943:32551/TCP,55555:30826/TCP,55003:30770/TCP,55443:32583/TCP,8008:32689/TCP,1443:32460/TCP,5672:31960/TCP,1883:32112/TCP,9000:30848/TCP 13m
+service/my-release-pubsubplus-discovery ClusterIP None 8080/TCP,8741/TCP,8300/TCP,8301/TCP,8302/TCP 13m
+
+NAME READY STATUS RESTARTS AGE
+pod/my-release-pubsubplus-0 1/1 Running 0 13m
+pod/my-release-pubsubplus-1 1/1 Running 0 13m
+pod/my-release-pubsubplus-2 1/1 Running 0 13m
+
+NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+persistentvolumeclaim/data-my-release-pubsubplus-0 Bound pvc-6b0cd358-30c4-11ea-9379-42010a8000c7 30Gi RWO standard 13m
+persistentvolumeclaim/data-my-release-pubsubplus-1 Bound pvc-6b14bc8a-30c4-11ea-9379-42010a8000c7 30Gi RWO standard 13m
+persistentvolumeclaim/data-my-release-pubsubplus-2 Bound pvc-6b24b2aa-30c4-11ea-9379-42010a8000c7 30Gi RWO standard 13m
+
+NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+persistentvolume/pvc-6b0cd358-30c4-11ea-9379-42010a8000c7 30Gi RWO Delete Bound default/data-my-release-pubsubplus-0 standard 13m
+persistentvolume/pvc-6b14bc8a-30c4-11ea-9379-42010a8000c7 30Gi RWO Delete Bound default/data-my-release-pubsubplus-1 standard 13m
+persistentvolume/pvc-6b24b2aa-30c4-11ea-9379-42010a8000c7 30Gi RWO Delete Bound default/data-my-release-pubsubplus-2 standard 13m
+
+
+prompt:~$ kubectl describe service my-release-pubsubplus
+Name: my-release-pubsubplus
+Namespace: test
+Labels: app.kubernetes.io/instance=my-release
+ app.kubernetes.io/managed-by=Tiller
+ app.kubernetes.io/name=pubsubplus
+ helm.sh/chart=pubsubplus-1.0.0
+Annotations:
+Selector: active=true,app.kubernetes.io/instance=my-release,app.kubernetes.io/name=pubsubplus
+Type: LoadBalancer
+IP: 10.100.200.41
+LoadBalancer Ingress: 34.67.66.30
+Port: ssh 2222/TCP
+TargetPort: 2222/TCP
+NodePort: ssh 30197/TCP
+Endpoints: 10.28.1.20:2222
+:
+:
+```
+
+Generally, all services including management and messaging are accessible through a Load Balancer. In the above example `34.67.66.30` is the Load Balancer's external Public IP to use.
+
+> Note: When using MiniKube, there is no integrated Load Balancer. For a workaround, execute `minikube service XXX-XXX-solace` to expose the services. Services will be accessible directly using mapped ports instead of direct port access, for which the mapping can be obtained from `kubectl describe service XXX-XX-solace`.
+
+### Gaining admin access to the event broker
+
+There are [multiple management tools](//docs.solace.com/Management-Tools.htm ) available. The WebUI is the recommended simplest way to administer the event broker for common tasks.
+
+#### Admin Password
+
+A random admin password will be generated if it has not been provided at deployment using the `solace.usernameAdminPassword` parameter, refer to the the information from `helm status` how to retrieve it.
+
+**Important:** Every time `helm install` or `helm upgrade` is called a new admin password will be generated, which may break an existing deployment. Therefore ensure to always provide the password from the initial deployment as `solace.usernameAdminPassword=` parameter to subsequent `install` and `upgrade` commands.
+
+#### WebUI, SolAdmin and SEMP access
+
+Use the Load Balancer's external Public IP at port 8080 to access these services.
+
+#### Solace CLI access
+
+If you are using a single event broker and are used to working with a CLI event broker console access, you can SSH into the event broker as the `admin` user using the Load Balancer's external Public IP:
+
+```sh
+
+$ssh -p 2222 admin@35.202.131.158
+Solace PubSub+ Standard
+Password:
+
+Solace PubSub+ Standard Version 9.4.0.105
+
+The Solace PubSub+ Standard is proprietary software of
+Solace Corporation. By accessing the Solace PubSub+ Standard
+you are agreeing to the license terms and conditions located at
+//www.solace.com/license-software
+
+Copyright 2004-2019 Solace Corporation. All rights reserved.
+
+To purchase product support, please contact Solace at:
+//dev.solace.com/contact-us/
+
+Operating Mode: Message Routing Node
+
+XXX-XXX-pubsubplus-0>
+```
+
+If you are using an HA deployment, it is better to access the CLI through the Kubernets pod and not directly via SSH.
+
+* Loopback to SSH directly on the pod
+
+```sh
+kubectl exec -it XXX-XXX-pubsubplus-0 -- bash -c "ssh -p 2222 admin@localhost"
+```
+
+* Loopback to SSH on your host with a port-forward map
+
+```sh
+kubectl port-forward XXX-XXX-pubsubplus-0 62222:2222 &
+ssh -p 62222 admin@localhost
+```
+
+This can also be mapped to individual event brokers in the deployment via port-forward:
+
+```
+kubectl port-forward XXX-XXX-pubsubplus-0 8081:8080 &
+kubectl port-forward XXX-XXX-pubsubplus-1 8082:8080 &
+kubectl port-forward XXX-XXX-pubsubplus-2 8083:8080 &
+```
+
+#### SSH access to individual event brokers
+
+For direct access, use:
+
+```sh
+kubectl exec -it XXX-XXX-pubsubplus- -- bash
+```
+
+### Testing data access to the event broker
+
+To test data traffic though the newly created event broker instance, visit the Solace Developer Portal [APIs & Protocols](//www.solace.dev/ ). Under each option there is a Publish/Subscribe tutorial that will help you get started and provide the specific default port to use.
+
+Use the external Public IP to access the deployment. If a port required for a protocol is not opened, refer to the [Modification example](#modification-example) how to open it up.
+
+## Troubleshooting
+
+### General Kubernetes troubleshooting hints
+https://kubernetes.io/docs/tasks/debug-application-cluster/debug-application/
+
+### Checking the reason for failed resources
+
+Run `kubectl get statefulsets,services,pods,pvc,pv` to get an understanding of the state, then drill down to get more information on a failed resource to reveal possible Kubernetes resourcing issues, e.g.:
+```sh
+kubectl describe pvc
+```
+
+### Viewing logs
+
+Detailed logs from the currently running container in a pod:
+```sh
+kubectl logs XXX-XXX-pubsubplus-0 -f # use -f to follow live
+```
+
+It is also possible to get the logs from a previously terminated or failed container:
+```sh
+kubectl logs XXX-XXX-pubsubplus-0 -p
+```
+
+Filtering on bringup logs (helps with initial troubleshooting):
+```sh
+kubectl logs XXX-XXX-pubsubplus-0 | grep [.]sh
+```
+
+### Viewing events
+
+Kubernetes collects [all events for a cluster in one pool](//kubernetes.io/docs/tasks/debug-application-cluster/events-stackdriver ). This includes events related to the PubSub+ deployment.
+
+It is recommended to watch events when creating or upgrading a Solace deployment. Events clear after about an hour. You can query all available events:
+
+```sh
+kubectl get events -w # use -w to watch live
+```
+
+### PubSub+ Software Event Broker troubleshooting
+
+#### Pods stuck in not enough resources
+
+If pods stay in pending state and `kubectl describe pods` reveals there are not enough memory or CPU resources, check the [resource requirements of the targeted scaling tier](#cpu-and-memory-requirements) of your deployment and ensure adequate node resources are available.
+
+#### Pods stuck in no storage
+
+Pods may also stay in pending state because [storage requirements](#storage) cannot be met. Check `kubectl get pv,pvc`. PVCs and PVs should be in bound state and if not then use `kubectl describe pvc` for any issues.
+
+Unless otherwise specified, a default storage class must be available for default PubSub+ deployment configuration.
+```bash
+kubectl get storageclasses
+```
+
+#### Pods stuck in CrashLoopBackoff, Failed or Not Ready
+
+Pods stuck in CrashLoopBackoff, or Failed, or Running but not Ready "active" state, usually indicate an issue with available Kubernetes node resources or with the container OS or the event broker process start.
+
+* Try to understand the reason following earlier hints in this section.
+* Try to recreate the issue by deleting and then reinstalling the deployment - ensure to remove related PVCs if applicable as they would mount volumes with existing, possibly outdated or incompatible database - and watch the [logs](#viewing-logs) and [events](#viewing-events) from the beginning. Look for ERROR messages preceded by information that may reveal the issue.
+
+#### No Pods listed
+
+If no pods are listed related to your deployment check the StatefulSet for any clues:
+```
+kubectl describe statefulset my-release-pubsubplus
+```
+
+#### Security constraints
+
+Your Kubernetes environment's security constraints may also impact successful deployment. Review the [Security considerations](#security-considerations) section.
+
+## Modifying or upgrading a Deployment
+
+Use the `helm upgrade` command to upgrade/modify the event broker deployment: request the required modifications to the chart in passing the new/changed parameters or creating an upgrade `` YAML file. When chaining multiple `-f ` to Helm, the override priority will be given to the last (right-most) file specified.
+
+For both version upgrade and modifications, the "RollingUpdate" strategy of the Kubernetes StatefulSet applies: pods in the StatefulSet are restarted with new values in reverse order of ordinals, which means for PubSubPlus first the monitoring node (ordinal 2), then backup (ordinal 1) and finally the primary node (ordinal 0).
+
+For the next examples, assume a deployment has been created with some initial overrides for a development HA cluster:
+```bash
+helm install my-release solacecharts/pubsubplus --set solace.size=dev,solace.redundancy=true
+```
+
+#### Getting the currently used parameter values
+
+Currently used parameter values are the default chart parameter values overlayed with value-overrides.
+
+To get the default chart parameter values, check `helm show values solacecharts/pubsubplus`.
+
+To get the current value-overrides, execute:
+```
+$ helm get values my-release
+USER-SUPPLIED VALUES:
+solace:
+ redundancy: true
+ size: dev
+```
+**Important:** this may not show, but be aware of an additional non-default parameter:
+```
+solace:
+ usernameAdminPassword: jMzKoW39zz # The value is just an example
+```
+This has been generated at the initial deployment if not specified and must be used henceforth for all change requests, to keep the same. See related note in the [Admin Password section](#admin-password).
+
+#### Upgrade example
+
+To **upgrade** the version of the event broker running within a Kubernetes cluster:
+
+- Add the new version of the event broker to your container registry, then
+- Either:
+ * Set the new image in the Helm upgrade command, also ensure to include the original overrides:
+```bash
+helm upgrade my-release solacecharts/pubsubplus \
+ --set solace.size=dev,solace.redundancy=true,solace.usernameAdminPassword: jMzKoW39zz \
+ --set image.repository=//solace-pubsub-standard,image.tag=NEW.VERSION.XXXXX,image.pullPolicy=IfNotPresent
+```
+ * Or create a simple `version-upgrade.yaml` file and use that to upgrade the release:
+```bash
+tee ./version-upgrade.yaml <<-EOF # include original and new overrides
+solace:
+ redundancy: true
+ size: dev
+ usernameAdminPassword: jMzKoW39zz
+image:
+ repository: //solace-pubsub-standard
+ tag: NEW.VERSION.XXXXX
+ pullPolicy: IfNotPresent
+EOF
+helm upgrade my-release solacecharts/pubsubplus -f version-upgrade.yaml
+```
+> Note: upgrade will begin immediately, in the order of pod 2, 1 and 0 (Monitor, Backup, Primary) taken down for upgrade in an HA deployment. This will affect running event broker instances, result in potentially multiple failovers and requires connection-retries configured in the client.
+
+#### Modification example
+
+Similarly, to **modify** deployment parameters, you need pass modified value-overrides. Passing the same value-overrides to upgrade will result in no change.
+
+In this example we will add the AMQP encrypted (TLS) port to the loadbalancer - it is not included by default.
+
+First [look up](//docs.solace.com/Configuring-and-Managing/Default-Port-Numbers.htm#Software) the port number for MQTT TLS: the required port is 5671.
+
+Next, create an update file with the additional contents:
+```bash
+tee ./port-update.yaml <<-EOF # :
+service:
+ ports:
+ - servicePort: 5671
+ containerPort: 5671
+ protocol: TCP
+ name: amqptls
+EOF
+```
+
+Now upgrade the deployment, passing the changes. This time the original `--set` value-overrides are combined with the override file:
+```bash
+helm upgrade my-release solacecharts/pubsubplus \
+ --set solace.size=dev,solace.redundancy=true,solace.usernameAdminPassword: jMzKoW39zz \
+ --values port-update.yaml
+```
+
+## Re-installing a Deployment
+
+If using *persistent* storage broker data will not be deleted upon `helm delete`.
+
+In this case the deployment can be reinstalled and continue from the point before the `helm delete` command was executed by running `helm install` again, using the **same** release name and parameters as the previous run. This includes explicitly providing the same admin password as before.
+
+```
+# Initial deployment:
+helm install my-release solacecharts/pubsubplus --set solace.size=dev,solace.redundancy=true
+# This will auto-generate an admin password
+# Retrieve the admin password, follow instructions from the output of "helm status", section Admin credentials
+# Delete this deployment
+helm delete my-release
+# Reinstall deployment, assuming persistent storage. Notice the admin password specified
+helm install my-release solacecharts/pubsubplus --set solace.size=dev,solace.redundancy=true,solace.usernameAdminPassword=jMzKoW39zz
+# Original deployment is now back up
+```
+
+## Deleting a Deployment
+
+Use Helm to delete a deployment, also called a release:
+```
+helm delete my-release
+```
+
+Check what has remained from the deployment:
+```
+kubectl get statefulsets,services,pods,pvc,pv
+```
+
+> Note: Helm will not clean up PVCs and related PVs. Use `kubectl delete` to delete PVCs is associated data is no longer required.
+
+
+
+
+
+
+
+
diff --git a/docs/helm-charts/create-chart-variants.sh b/docs/helm-charts/create-chart-variants.sh
index 547340bb..e61fcb94 100644
--- a/docs/helm-charts/create-chart-variants.sh
+++ b/docs/helm-charts/create-chart-variants.sh
@@ -53,5 +53,6 @@ for variant in '' '-dev' '-ha' ;
sed -i 's%helm repo add.*%helm repo add openshift-helm-charts https://charts.openshift.io%g' pubsubplus-openshift"$variant"/README.md
sed -i 's%solacecharts/pubsubplus%openshift-helm-charts/pubsubplus-openshift%g' pubsubplus-openshift"$variant"/README.md
sed -i 's@`solace/solace-pubsub-standard`@`registry.connect.redhat.com/solace/pubsubplus-standard`@g' pubsubplus-openshift"$variant"/README.md
+ sed -i 's/kubectl/oc/g' pubsubplus-openshift"$variant"/templates/NOTES.txt
helm package pubsubplus-openshift"$variant"
done
diff --git a/pubsubplus/.helmignore b/pubsubplus/.helmignore
index f0c13194..daebc7da 100644
--- a/pubsubplus/.helmignore
+++ b/pubsubplus/.helmignore
@@ -1,21 +1,21 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/pubsubplus/Chart.yaml b/pubsubplus/Chart.yaml
index b8c1dae5..16b4d42c 100644
--- a/pubsubplus/Chart.yaml
+++ b/pubsubplus/Chart.yaml
@@ -1,29 +1,29 @@
-apiVersion: v2
-description: Deploy Solace PubSub+ Event Broker Singleton or HA redundancy group onto a Kubernetes Cluster
-name: pubsubplus
-version: 3.0.0
-icon: https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/images/PubSubPlus.png
-kubeVersion: '>= 1.10.0-0'
-maintainers:
- - name: Solace Community Forum
- url: https://solace.community/
- - name: Solace Support
- url: https://solace.com/support/
-home: https://dev.solace.com
-sources:
- - https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart
-keywords:
-- solace
-- pubsubplus
-- pubsub+
-- pubsub
-- messaging
-- advanced event broker
-- event broker
-- event mesh
-- event streaming
-- data streaming
-- event integration
-- middleware
-annotations:
- charts.openshift.io/name: PubSub+ Event Broker
+apiVersion: v2
+description: Deploy Solace PubSub+ Event Broker Singleton or HA redundancy group onto a Kubernetes Cluster
+name: pubsubplus
+version: 3.1.0
+icon: https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/images/PubSubPlus.png
+kubeVersion: '>= 1.10.0-0'
+maintainers:
+ - name: Solace Community Forum
+ url: https://solace.community/
+ - name: Solace Support
+ url: https://solace.com/support/
+home: https://dev.solace.com
+sources:
+ - https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart
+keywords:
+- solace
+- pubsubplus
+- pubsub+
+- pubsub
+- messaging
+- advanced event broker
+- event broker
+- event mesh
+- event streaming
+- data streaming
+- event integration
+- middleware
+annotations:
+ charts.openshift.io/name: PubSub+ Event Broker
diff --git a/pubsubplus/LICENSE b/pubsubplus/LICENSE
index 8dada3ed..c0ee8129 100644
--- a/pubsubplus/LICENSE
+++ b/pubsubplus/LICENSE
@@ -1,201 +1,201 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/pubsubplus/README.md b/pubsubplus/README.md
index f5bd9741..6875d4eb 100644
--- a/pubsubplus/README.md
+++ b/pubsubplus/README.md
@@ -1,113 +1,117 @@
-# Solace PubSub+ Software Event Broker - Helm Chart
-
-The [Solace PubSub+ Platform](https://solace.com/products/platform/)'s [software event broker](https://solace.com/products/event-broker/software/) efficiently streams event-driven information between applications, IoT devices and user interfaces running in cloud, on-premises, and hybrid environments using open APIs and protocols like AMQP, JMS, MQTT, REST and WebSocket. It can be installed into a variety of public and private clouds, PaaS, and on-premises environments, and brokers in multiple locations can be linked together in an [event mesh](https://solace.com/what-is-an-event-mesh/) to dynamically share events across the distributed enterprise.
-
-## Overview
-
-This chart bootstraps a single-node or HA deployment of a [Solace PubSub+ Software Event Broker](//solace.com/products/event-broker/software/) on a [Kubernetes](//kubernetes.io) cluster using the [Helm](//helm.sh) package manager.
-
-Detailed documentation is provided in the [Solace PubSub+ Software Event Broker on Kubernetes Documentation](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md).
-
-## Prerequisites
-
-* Kubernetes 1.10 or later platform with adequate [CPU and memory](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#cpu-and-memory-requirements) and [storage resources](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#disk-storage) for the targeted scaling tier requirements
-* Helm package manager v3 client installed
-* If using a private container image registry, load the PubSub+ Software Event Broker container image and for signed images create an image pull secret
-* With persistent storage enabled (see in [Configuration](#config-storageclass)):
- * Specify a storage class unless using a default storage class in your Kubernetes cluster
-
-Also review additional [deployment considerations](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#pubsub-software-event-broker-deployment-considerations).
-
-## Create a deployment
-
-```bash
-helm repo add solacecharts https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
-helm install my-release solacecharts/pubsubplus
-```
-
-> Note: the release name is not recommended to exceed 28 characters
-
-## Use a deployment
-
-Obtain information about the deployment and services:
-
-```bash
-helm status my-release
-```
-
-Refer to the detailed PubSub+ Kubernetes documentation for:
-* [Validating the deployment](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#validating-the-deployment); or
-* [Troubleshooting](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#troubleshooting)
-* [Modifying or Upgrading](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#modifying-or-upgrading-a-deployment)
-
-## Delete a deployment
-
-```bash
-helm delete my-release
-kubectl get pvc | grep data-my-release
-# Delete any PVCs related to my-release
-```
-**Important:** Ensure to delete existing PVCs if reusing the same deployment name for a clean new deployment.
-
-## Configuration
-
-The following table lists the configurable parameters of the PubSub+ chart and their default values. For a detailed discussion refer to the [Deployment Considerations](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md##pubsub-helm-chart-deployment-considerations) in the PubSub+ Kubernetes documentation.
-
-There are several ways to customize the deployment:
-
-- Override default values using the `--set key=value[,key=value]` argument to `helm install`. For example,
-```bash
-helm install my-release \
- --set solace.redundancy=true,solace.usernameAdminPassword=secretpassword \
- solacecharts/pubsubplus
-```
-
-- Another option is to create a YAML file containing the values to override and pass that to Helm:
-```bash
-# Create file
-echo "# Overrides:
-solace:
- redundancy: true
- usernameAdminPassword: secretpassword" > my-values.yaml
-# Now use the file:
-helm install --name my-release -f my-values.yaml solacecharts/pubsubplus
-```
-> Note: as an alternative to creating a new file you can [download](https://raw.githubusercontent.com/SolaceProducts/pubsubplus-kubernetes-quickstart/master/pubsubplus/values.yaml) the `values.yaml` file with default values and edit that for overrides.
-
-For more ways to override default chart values, refer to [Customizing the Helm Chart Before Installing](//helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing).
-
-| Parameter | Description | Default |
-| ------------------------------ | ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- |
-| `nameOverride` | Kubernetes objects will be named as `-nameOverride` | Undefined, default naming is `-` |
-| `fullnameOverride` | Kubernetes objects will be named as `fullnameOverride` | Undefined, default naming is `-` |
-| `solace.redundancy` | `false` will create a single-node non-HA deployment; `true` will create an HA deployment with Primary, Backup and Monitor nodes | `false` |
-| `solace.size` | Event broker connection scaling. Options: `dev` (requires minimum resources but no guaranteed performance), `prod100`, `prod1k`, `prod10k`, `prod100k`, `prod200k` | `prod100` |
-| `solace.usernameAdminPassword` | The password for the "admin" management user. Will autogenerate it if not provided. **Important:** refer to the the information from `helm status` how to retrieve it and use it for `helm upgrade`. | Undefined, meaning autogenerate |
-| `solace.timezone` | Timezone setting for the PubSub+ container. Valid values are tz database time zone names. | Undefined, default is UTC |
-| `solace.extraEnvVars` | List of extra environment variables to be added to the PubSub+ container. A primary use case is to specify [configuration keys](https://docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/Docker-Tasks/Config-SW-Broker-Container-Cfg-Keys.htm). Important: env variables defined here will not override the ones defined in solaceConfigMap. | Undefined |
-| `solace.extraEnvVarsCM` | The name of an existing ConfigMap containing extra environment variables | Undefined |
-| `solace.extraEnvVarsSecret` | The name of an existing Secret containing extra environment variables (in case of sensitive data) | Undefined |
-| `image.repository` | The image repo name and path to the PubSub+ container image | `solace/solace-pubsub-standard` |
-| `image.tag` | The Solace container image tag. It is recommended to specify an explicit tag for production use For possible tags, refer to the [Solace Docker Hub repo](https://hub.docker.com/r/solace/solace-pubsub-standard/tags) | `latest` |
-| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
-| `image.pullSecretName` | Name of the ImagePullSecret to be used with the PubSub+ container image registry | Undefined, meaning no ImagePullSecret used |
-| `securityContext.enabled` | `true` enables to using defined `fsGroup` and `runAsUser`. Set to `false` if `fsGroup` and `runAsUser` conflict with PodSecurityPolicy or Openshift SCC settings. | `true` meaning `fsGroup` and `runAsUser` used |
-| `securityContext.fsGroup` | Specifies `fsGroup` in pod security context | set to default non-zero id 1000002 |
-| `securityContext.runAsUser` | Specifies `runAsUser` in pod security context | set to default PubSub+ appuser id 1000001 |
-| `serviceAccount.create` | `true` will create a service account dedicated to the deployment in the namespace | `true` |
-| `serviceAccount.name` | Refer to https://helm.sh/docs/topics/chart_best_practices/rbac/#using-rbac-resources | Undefined |
-| `tls.enabled` | Enable to use TLS to access exposed broker services | `false` (not enabled) |
-| `tls.serverCertificatesSecret` | Name of the Kubernetes Secret that contains the certificates - required if TLS is enabled | Undefined |
-| `tls.certFilename` | Name of the Certificate file in the `serverCertificatesSecret` | `tls.crt` |
-| `tls.certKeyFilename` | Name of the Key file in the `serverCertificatesSecret` | `tls.key` |
-| `service.type` | How to expose the service: options include ClusterIP, NodePort, LoadBalancer | `LoadBalancer` |
-| `service.annotations` | service.annotations allows to add provider-specific service annotations | Undefined |
-| `service.ports` | Define PubSub+ service ports exposed. servicePorts are external, mapping to cluster-local pod containerPorts | initial set of frequently used ports, refer to values.yaml |
-| `storage.persistent` | `false` to use ephemeral storage at pod level; `true` to request persistent storage through a StorageClass | `true`, false is not recommended for production use |
-| `storage.slow` | `true` to indicate slow storage used, e.g. for NFS. | `false` |
-| `storage.customVolumeMount` | customVolumeMount can be used to specify a YAML fragment how the data volume should be mounted instead of using a storage class. | Undefined |
-| `storage.useStorageClass` | Name of the StorageClass to be used to request persistent storage volumes | Undefined, meaning to use the "default" StorageClass for the Kubernetes cluster |
-| `storage.size` | Size of the persistent storage to be used; Refer to the Solace documentation for storage configuration requirements | `30Gi` |
-
-
-
+# Solace PubSub+ Software Event Broker - Helm Chart
+
+The [Solace PubSub+ Platform](https://solace.com/products/platform/)'s [software event broker](https://solace.com/products/event-broker/software/) efficiently streams event-driven information between applications, IoT devices and user interfaces running in cloud, on-premises, and hybrid environments using open APIs and protocols like AMQP, JMS, MQTT, REST and WebSocket. It can be installed into a variety of public and private clouds, PaaS, and on-premises environments, and brokers in multiple locations can be linked together in an [event mesh](https://solace.com/what-is-an-event-mesh/) to dynamically share events across the distributed enterprise.
+
+## Overview
+
+This chart bootstraps a single-node or HA deployment of a [Solace PubSub+ Software Event Broker](//solace.com/products/event-broker/software/) on a [Kubernetes](//kubernetes.io) cluster using the [Helm](//helm.sh) package manager.
+
+Detailed documentation is provided in the [Solace PubSub+ Software Event Broker on Kubernetes Documentation](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md).
+
+## Prerequisites
+
+* Kubernetes 1.10 or later platform with adequate [CPU and memory](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#cpu-and-memory-requirements) and [storage resources](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#disk-storage) for the targeted scaling tier requirements
+* Helm package manager v3 client installed
+* If using a private container image registry, load the PubSub+ Software Event Broker container image and for signed images create an image pull secret
+* With persistent storage enabled (see in [Configuration](#config-storageclass)):
+ * Specify a storage class unless using a default storage class in your Kubernetes cluster
+
+Also review additional [deployment considerations](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#pubsub-software-event-broker-deployment-considerations).
+
+## Create a deployment
+
+```bash
+helm repo add solacecharts https://solaceproducts.github.io/pubsubplus-kubernetes-quickstart/helm-charts
+helm install my-release solacecharts/pubsubplus
+```
+
+> Note: the release name is not recommended to exceed 28 characters
+
+## Use a deployment
+
+Obtain information about the deployment and services:
+
+```bash
+helm status my-release
+```
+
+Refer to the detailed PubSub+ Kubernetes documentation for:
+* [Validating the deployment](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#validating-the-deployment); or
+* [Troubleshooting](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#troubleshooting)
+* [Modifying or Upgrading](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md#modifying-or-upgrading-a-deployment)
+
+## Delete a deployment
+
+```bash
+helm delete my-release
+kubectl get pvc | grep data-my-release
+# Delete any PVCs related to my-release
+```
+**Important:** Ensure to delete existing PVCs if reusing the same deployment name for a clean new deployment.
+
+## Configuration
+
+The following table lists the configurable parameters of the PubSub+ chart and their default values. For a detailed discussion refer to the [Deployment Considerations](//github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/docs/PubSubPlusK8SDeployment.md##pubsub-helm-chart-deployment-considerations) in the PubSub+ Kubernetes documentation.
+
+There are several ways to customize the deployment:
+
+- Override default values using the `--set key=value[,key=value]` argument to `helm install`. For example,
+```bash
+helm install my-release \
+ --set solace.redundancy=true,solace.usernameAdminPassword=secretpassword \
+ solacecharts/pubsubplus
+```
+
+- Another option is to create a YAML file containing the values to override and pass that to Helm:
+```bash
+# Create file
+echo "# Overrides:
+solace:
+ redundancy: true
+ usernameAdminPassword: secretpassword" > my-values.yaml
+# Now use the file:
+helm install --name my-release -f my-values.yaml solacecharts/pubsubplus
+```
+> Note: as an alternative to creating a new file you can [download](https://raw.githubusercontent.com/SolaceProducts/pubsubplus-kubernetes-quickstart/master/pubsubplus/values.yaml) the `values.yaml` file with default values and edit that for overrides.
+
+For more ways to override default chart values, refer to [Customizing the Helm Chart Before Installing](//helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing).
+
+| Parameter | Description | Default |
+| ------------------------------ | ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- |
+| `nameOverride` | Kubernetes objects will be named as `-nameOverride` | Undefined, default naming is `-` |
+| `fullnameOverride` | Kubernetes objects will be named as `fullnameOverride` | Undefined, default naming is `-` |
+| `solace.redundancy` | `false` will create a single-node non-HA deployment; `true` will create an HA deployment with Primary, Backup and Monitor nodes | `false` |
+| `solace.size` | Event broker simple vertical scaling by number of client connections. **Ignored** if `solace.systemScaling` is set. Options: `dev` (requires minimum resources but no guaranteed performance), `prod100`, `prod1k`, `prod10k`, `prod100k`, `prod200k`. | `prod100` |
+| `solace.systemScaling.*` | Event broker fine-grained vertical scaling definition. If defined, all sub-settings must be provided and these settings will **override** `solace.size`. For scaling documentation, look for "system scaling" at [docs.solace.com](https://docs.solace.com/Search.htm?q=system%20scaling). Use the [online calculator](https://docs.solace.com/Assistance-Tools/Resource-Calculator/pubsubplus-resource-calculator.html) to determine CPU, Memory and Storage requirements for "Container (messaging)" type. `maxConnections`: max supported number of client connections `maxQueueMessages`: max number of queue messages, in millions of messages `maxSpoolUsage`: max Spool Usage, in MB. Also ensure adequate storage.size parameter, use the calculator `cpu`: CPUs in cores `memory`: host Virtual Memory, in MiB | Undefined |
+| `solace.podModifierEnabled` | Enables modifying (reducing) CPU and memory resources for Monitoring nodes in an HA deployment. Also requires the ["solace-pod-modifier" Kubernetes admission plugin](https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart/blob/master/solace-pod-modifier-admission-plugin/README.md#how-to-use) deployed to work. | Undefined, meaning not enabled. |
+| `solace.usernameAdminPassword` | The password for the "admin" management user. Will autogenerate it if not provided. **Important:** refer to the the information from `helm status` how to retrieve it and use it for `helm upgrade`. | Undefined, meaning autogenerate |
+| `solace.timezone` | Timezone setting for the PubSub+ container. Valid values are tz database time zone names. | Undefined, default is UTC |
+| `solace.extraEnvVars` | List of extra environment variables to be added to the PubSub+ container. A primary use case is to specify [configuration keys](https://docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/Docker-Tasks/Config-SW-Broker-Container-Cfg-Keys.htm). Important: env variables defined here will not override the ones defined in solaceConfigMap. | Undefined |
+| `solace.extraEnvVarsCM` | The name of an existing ConfigMap containing extra environment variables | Undefined |
+| `solace.extraEnvVarsSecret` | The name of an existing Secret containing extra environment variables (in case of sensitive data) | Undefined |
+| `image.repository` | The image repo name and path to the PubSub+ container image | `solace/solace-pubsub-standard` |
+| `image.tag` | The Solace container image tag. It is recommended to specify an explicit tag for production use For possible tags, refer to the [Solace Docker Hub repo](https://hub.docker.com/r/solace/solace-pubsub-standard/tags) | `latest` |
+| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
+| `image.pullSecretName` | Name of the ImagePullSecret to be used with the PubSub+ container image registry | Undefined, meaning no ImagePullSecret used |
+| `securityContext.enabled` | `true` enables to using defined `fsGroup` and `runAsUser`. Set to `false` if `fsGroup` and `runAsUser` conflict with PodSecurityPolicy or Openshift SCC settings. | `true` meaning `fsGroup` and `runAsUser` used |
+| `securityContext.fsGroup` | Specifies `fsGroup` in pod security context | set to default non-zero id 1000002 |
+| `securityContext.runAsUser` | Specifies `runAsUser` in pod security context | set to default PubSub+ appuser id 1000001 |
+| `serviceAccount.create` | `true` will create a service account dedicated to the deployment in the namespace | `true` |
+| `serviceAccount.name` | Refer to https://helm.sh/docs/topics/chart_best_practices/rbac/#using-rbac-resources | Undefined |
+| `tls.enabled` | Enable to use TLS to access exposed broker services | `false` (not enabled) |
+| `tls.serverCertificatesSecret` | Name of the Kubernetes Secret that contains the certificates - required if TLS is enabled | Undefined |
+| `tls.certFilename` | Name of the Certificate file in the `serverCertificatesSecret` | `tls.crt` |
+| `tls.certKeyFilename` | Name of the Key file in the `serverCertificatesSecret` | `tls.key` |
+| `service.type` | How to expose the service: options include ClusterIP, NodePort, LoadBalancer | `LoadBalancer` |
+| `service.annotations` | service.annotations allows to add provider-specific service annotations | Undefined |
+| `service.ports` | Define PubSub+ service ports exposed. servicePorts are external, mapping to cluster-local pod containerPorts | initial set of frequently used ports, refer to values.yaml |
+| `storage.persistent` | `false` to use ephemeral storage at pod level; `true` to request persistent storage through a StorageClass | `true`, false is not recommended for production use |
+| `storage.slow` | `true` to indicate slow storage used, e.g. for NFS. | `false` |
+| `storage.customVolumeMount` | customVolumeMount can be used to specify a YAML fragment how the data volume should be mounted instead of using a storage class. | Undefined |
+| `storage.useStorageClass` | Name of the StorageClass to be used to request persistent storage volumes | Undefined, meaning to use the "default" StorageClass for the Kubernetes cluster |
+| `storage.size` | Size of the persistent storage to be used; Refer to the Solace documentation and [online calculator](https://docs.solace.com/Assistance-Tools/Resource-Calculator/pubsubplus-resource-calculator.html) for storage size requirements | `30Gi` |
+| `storage.monitorStorageSize` | If provided this will create and assign the minimum recommended storage to Monitor pods. For initial deployments only. | `1500M` |
+| `storage.useStorageGroup` | `true` to use a single mount point storage-group, as recommended from PubSub+ version 9.12. Undefined or `false` is legacy behavior. Note: legacy mount still works for newer versions but may be deprecated in the future. | Undefined |
+
+
+
diff --git a/pubsubplus/templates/NOTES.txt b/pubsubplus/templates/NOTES.txt
index 0313237a..44e3141b 100644
--- a/pubsubplus/templates/NOTES.txt
+++ b/pubsubplus/templates/NOTES.txt
@@ -1,89 +1,98 @@
-
-== Check Solace PubSub+ deployment progress ==
-Deployment is complete when a PubSub+ pod representing an active event broker node's label reports "active=true".
-Watch progress by running:
- kubectl get pods --namespace {{ .Release.Namespace }} --show-labels -w | grep {{ template "solace.fullname" . }}
-
-For troubleshooting, refer to ***TroubleShooting.md***
-
-== TLS support ==
-{{- if not .Values.tls.enabled }}
-TLS has not been enabled for this deployment.
-{{- else }}
-TLS is enabled, using secret {{ .Values.tls.serverCertificatesSecret }} for server certificates configuration.
-{{- end }}
-
-== Admin credentials and access ==
-{{- if not .Values.solace.usernameAdminPassword }}
-*********************************************************************
-* An admin password was not specified and has been auto-generated.
-* You must retrieve it and provide it as value override
-* if using Helm upgrade otherwise your cluster will become unusable.
-*********************************************************************
-
-{{- end }}
- Username : admin
- Admin password : echo `kubectl get secret --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }}-secrets -o jsonpath="{.data.username_admin_password}" | base64 --decode`
- Use the "semp" service address to access the management API via browser or a REST tool, see Services access below.
-
-== Image used ==
-{{ .Values.image.repository }}:{{ .Values.image.tag }}
-
-== Storage used ==
-{{- if and ( .Values.storage.persistent ) ( .Values.storage.useStorageClass ) }}
-Using persistent volumes via dynamic provisioning, ensure specified StorageClass exists: `kubectl get sc {{ .Values.storage.useStorageClass }}`
-{{- else if .Values.storage.persistent}}
-Using persistent volumes via dynamic provisioning with the "default" StorageClass, ensure it exists: `kubectl get sc | grep default`
-{{- end }}
-{{- if and ( not .Values.storage.persistent ) ( not .Values.storage.hostPath ) ( not .Values.storage.existingVolume ) }}
-*******************************************************************************
-* This deployment is using pod-local ephemeral storage.
-* Note that any configuration and stored messages will be lost at pod restart.
-*******************************************************************************
-For production purposes it is recommended to use persistent storage.
-{{- end }}
-
-== Performance and resource requirements ==
-{{- if contains "dev" .Values.solace.size }}
-This is a minimum footprint deployment for development purposes. For guaranteed performance, specify a different solace.size value.
-{{- else }}
-The requested connection scaling tier for this deployment is: max {{ substr 4 10 .Values.solace.size }} connections.
-{{- end }}
-Following resources have been requested per PubSub+ pod:
- echo `kubectl get statefulset --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="Minimum resources: {.spec.template.spec.containers[0].resources.requests}"`
-
-== Services access ==
-To access services from pods within the k8s cluster, use these addresses:
-
- echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t{{ template "solace.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{.port}\n"`
-
-To access from outside the k8s cluster, perform the following steps.
-
-{{- if contains "NodePort" .Values.service.type }}
-
-Obtain the NodePort IP and service ports:
-
- export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[*].status.addresses[0].address}"); echo $NODE_IP
- # Use following ports with any of the NodeIPs
- echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t:{.nodePort}\n"`
-
-{{- else if contains "LoadBalancer" .Values.service.type }}
-
-Obtain the LoadBalancer IP and the service addresses:
-NOTE: At initial deployment it may take a few minutes for the LoadBalancer IP to be available.
- Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "solace.fullname" . }}'
-
- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}"); echo SERVICE_IP=$SERVICE_IP
- # Ensure valid SERVICE_IP is returned:
- echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t$SERVICE_IP:{.port}\n"`
-
-{{- else if contains "ClusterIP" .Values.service.type }}
-
-NOTE: The specified k8s service type for this deployment is "ClusterIP" and it is not exposing services externally.
-
-For local testing purposes you can use port-forward in a background process to map pod ports to local host, then use these service addresses:
-
- kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "solace.fullname" . }} $(echo `kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.targetPort}:{.port} "`) &
- echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t127.0.0.1:{.targetPort}\n"`
-
-{{- end }}
+
+== Check Solace PubSub+ deployment progress ==
+Deployment is complete when a PubSub+ pod representing an active event broker node's label reports "active=true".
+Watch progress by running:
+ kubectl get pods --namespace {{ .Release.Namespace }} --show-labels -w | grep {{ template "solace.fullname" . }}
+
+For troubleshooting, refer to ***TroubleShooting.md***
+
+== TLS support ==
+{{- if not .Values.tls.enabled }}
+TLS has not been enabled for this deployment.
+{{- else }}
+TLS is enabled, using secret {{ .Values.tls.serverCertificatesSecret }} for server certificates configuration.
+{{- end }}
+
+== Admin credentials and access ==
+{{- if not .Values.solace.usernameAdminPassword }}
+*********************************************************************
+* An admin password was not specified and has been auto-generated.
+* You must retrieve it and provide it as value override
+* if using Helm upgrade otherwise your cluster will become unusable.
+*********************************************************************
+
+{{- end }}
+ Username : admin
+ Admin password : echo `kubectl get secret --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }}-secrets -o jsonpath="{.data.username_admin_password}" | base64 --decode`
+ Use the "semp" service address to access the management API via browser or a REST tool, see Services access below.
+
+== Image used ==
+{{ .Values.image.repository }}:{{ .Values.image.tag }}
+
+== Storage used ==
+{{- if and ( .Values.storage.persistent ) ( .Values.storage.useStorageClass ) }}
+Using persistent volumes via dynamic provisioning, ensure specified StorageClass exists: `kubectl get sc {{ .Values.storage.useStorageClass }}`
+{{- else if .Values.storage.persistent}}
+Using persistent volumes via dynamic provisioning with the "default" StorageClass, ensure it exists: `kubectl get sc | grep default`
+{{- end }}
+{{- if and ( not .Values.storage.persistent ) ( not .Values.storage.hostPath ) ( not .Values.storage.existingVolume ) }}
+*******************************************************************************
+* This deployment is using pod-local ephemeral storage.
+* Note that any configuration and stored messages will be lost at pod restart.
+*******************************************************************************
+For production purposes it is recommended to use persistent storage.
+{{- end }}
+
+== Performance and resource requirements ==
+{{- if .Values.solace.systemScaling }}
+Max supported number of client connections: {{ .Values.solace.systemScaling.maxConnections }}
+Max number of queue messages, in millions of messages: {{ .Values.solace.systemScaling.maxQueueMessages }}
+Max spool usage, in MB: {{ .Values.solace.systemScaling.maxSpoolUsage }}
+Requested cpu, in cores: {{ .Values.solace.systemScaling.cpu }}
+Requested memory: {{ .Values.solace.systemScaling.memory }}
+Requested storage: {{ .Values.storage.size }}
+{{- else }}
+{{- if contains "dev" .Values.solace.size }}
+This is a minimum footprint deployment for development purposes. For guaranteed performance, specify a different solace.size value.
+{{- else }}
+The requested connection scaling tier for this deployment is: max {{ substr 4 10 .Values.solace.size }} connections.
+{{- end }}
+Following resources have been requested per PubSub+ pod:
+ echo `kubectl get statefulset --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="Minimum resources: {.spec.template.spec.containers[0].resources.requests}"`
+{{- end }}
+
+== Services access ==
+To access services from pods within the k8s cluster, use these addresses:
+
+ echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t{{ template "solace.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{.port}\n"`
+
+To access from outside the k8s cluster, perform the following steps.
+
+{{- if contains "NodePort" .Values.service.type }}
+
+Obtain the NodePort IP and service ports:
+
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[*].status.addresses[0].address}"); echo $NODE_IP
+ # Use following ports with any of the NodeIPs
+ echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t:{.nodePort}\n"`
+
+{{- else if contains "LoadBalancer" .Values.service.type }}
+
+Obtain the LoadBalancer IP and the service addresses:
+NOTE: At initial deployment it may take a few minutes for the LoadBalancer IP to be available.
+ Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "solace.fullname" . }}'
+
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}"); echo SERVICE_IP=$SERVICE_IP
+ # Ensure valid SERVICE_IP is returned:
+ echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t$SERVICE_IP:{.port}\n"`
+
+{{- else if contains "ClusterIP" .Values.service.type }}
+
+NOTE: The specified k8s service type for this deployment is "ClusterIP" and it is not exposing services externally.
+
+For local testing purposes you can use port-forward in a background process to map pod ports to local host, then use these service addresses:
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "solace.fullname" . }} $(echo `kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.targetPort}:{.port} "`) &
+ echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t127.0.0.1:{.targetPort}\n"`
+
+{{- end }}
diff --git a/pubsubplus/templates/_helpers.tpl b/pubsubplus/templates/_helpers.tpl
index 4e3ea9ff..d90e823b 100644
--- a/pubsubplus/templates/_helpers.tpl
+++ b/pubsubplus/templates/_helpers.tpl
@@ -1,29 +1,29 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "solace.name" -}}
- {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{/*
-Create a default fully qualified app name.
-We truncate at 53 chars because some Kubernetes name fields are limited (by the DNS naming spec).
-*/}}
-{{- define "solace.fullname" -}}
- {{- if .Values.fullnameOverride -}}
- {{- .Values.fullnameOverride | trunc 53 | trimSuffix "-" -}}
- {{- else -}}
- {{- $name := default .Chart.Name .Values.nameOverride -}}
- {{- printf "%s-%s" .Release.Name $name | trunc 53 | trimSuffix "-" -}}
- {{- end -}}
-{{- end -}}
-{{/*
-Return the name of the service account to use
-*/}}
-{{- define "solace.serviceAccountName" -}}
-{{- if .Values.serviceAccount.create -}}
- {{ default ( cat (include "solace.fullname" .) "-sa" | nospace ) .Values.serviceAccount.name }}
-{{- else -}}
- {{ default "default" .Values.serviceAccount.name }}
-{{- end -}}
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "solace.name" -}}
+ {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{/*
+Create a default fully qualified app name.
+We truncate at 53 chars because some Kubernetes name fields are limited (by the DNS naming spec).
+*/}}
+{{- define "solace.fullname" -}}
+ {{- if .Values.fullnameOverride -}}
+ {{- .Values.fullnameOverride | trunc 53 | trimSuffix "-" -}}
+ {{- else -}}
+ {{- $name := default .Chart.Name .Values.nameOverride -}}
+ {{- printf "%s-%s" .Release.Name $name | trunc 53 | trimSuffix "-" -}}
+ {{- end -}}
+{{- end -}}
+{{/*
+Return the name of the service account to use
+*/}}
+{{- define "solace.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default ( cat (include "solace.fullname" .) "-sa" | nospace ) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
{{- end -}}
\ No newline at end of file
diff --git a/pubsubplus/templates/podModRbac.yaml b/pubsubplus/templates/podModRbac.yaml
index 031aae6c..b50fab6a 100644
--- a/pubsubplus/templates/podModRbac.yaml
+++ b/pubsubplus/templates/podModRbac.yaml
@@ -1,34 +1,34 @@
-# Deployment requires the capability of patching the pod to indicate active state for load balancing
-{{- if .Values.serviceAccount.create }}
-kind: ServiceAccount
-apiVersion: v1
-metadata:
- name: {{ template "solace.serviceAccountName" . }}
- labels:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
----
-{{- end }}
-kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: {{ template "solace.fullname" . }}-podtagupdater
-rules:
-- apiGroups: [""] # "" indicates the core API group
- resources: ["pods"]
- verbs: ["patch"]
----
-kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: {{ template "solace.fullname" . }}-serviceaccounts-to-podtagupdater
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: {{ template "solace.fullname" . }}-podtagupdater
-subjects:
-- kind: ServiceAccount
- name: {{ template "solace.serviceAccountName" . }}
-
+# Deployment requires the capability of patching the pod to indicate active state for load balancing
+{{- if .Values.serviceAccount.create }}
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+ name: {{ template "solace.serviceAccountName" . }}
+ labels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+---
+{{- end }}
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "solace.fullname" . }}-podtagupdater
+rules:
+- apiGroups: [""] # "" indicates the core API group
+ resources: ["pods"]
+ verbs: ["patch"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ template "solace.fullname" . }}-serviceaccounts-to-podtagupdater
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ template "solace.fullname" . }}-podtagupdater
+subjects:
+- kind: ServiceAccount
+ name: {{ template "solace.serviceAccountName" . }}
+
diff --git a/pubsubplus/templates/secret.yaml b/pubsubplus/templates/secret.yaml
index 7859518a..d83b6750 100644
--- a/pubsubplus/templates/secret.yaml
+++ b/pubsubplus/templates/secret.yaml
@@ -1,23 +1,23 @@
-{{- $secretBase := include "solace.fullname" . }}
-{{- $secretName := printf "%s-%s" $secretBase "secrets" }}
-{{- $adminPasswordValue := (randAlpha 10) | b64enc | quote }}
-{{- $secret := (lookup "v1" "Secret" .Release.Namespace $secretName) }}
-{{- if $secret }}
-{{- $adminPasswordValue = index $secret.data "username_admin_password" }}
-{{- end -}}
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ $secretName }}
- labels:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
-type: Opaque
-data:
-{{ if .Values.solace.usernameAdminPassword }}
- username_admin_password: {{ .Values.solace.usernameAdminPassword | b64enc | quote }}
-{{ else }}
- username_admin_password: {{ $adminPasswordValue}}
-{{ end }}
+{{- $secretBase := include "solace.fullname" . }}
+{{- $secretName := printf "%s-%s" $secretBase "secrets" }}
+{{- $adminPasswordValue := (randAlpha 10) | b64enc | quote }}
+{{- $secret := (lookup "v1" "Secret" .Release.Namespace $secretName) }}
+{{- if $secret }}
+{{- $adminPasswordValue = index $secret.data "username_admin_password" }}
+{{- end -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secretName }}
+ labels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+type: Opaque
+data:
+{{ if .Values.solace.usernameAdminPassword }}
+ username_admin_password: {{ .Values.solace.usernameAdminPassword | b64enc | quote }}
+{{ else }}
+ username_admin_password: {{ $adminPasswordValue}}
+{{ end }}
diff --git a/pubsubplus/templates/service-discovery.yaml b/pubsubplus/templates/service-discovery.yaml
index 6151a95f..a4a8339d 100644
--- a/pubsubplus/templates/service-discovery.yaml
+++ b/pubsubplus/templates/service-discovery.yaml
@@ -1,30 +1,30 @@
-{{- if .Values.solace.redundancy }}
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ template "solace.fullname" . }}-discovery
- labels:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
- annotations:
- service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
-spec:
- ports:
- - port: 8080
- name: tcp-semp
- - port: 8741
- name: tcp-ha-mate-link
- - port: 8300
- name: tcp-ha-conf-sync0
- - port: 8301
- name: tcp-ha-conf-sync1
- - port: 8302
- name: tcp-ha-conf-sync2
- clusterIP: None
- selector:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- publishNotReadyAddresses: true
-{{- end }}
+{{- if .Values.solace.redundancy }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "solace.fullname" . }}-discovery
+ labels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ annotations:
+ service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+ ports:
+ - port: 8080
+ name: tcp-semp
+ - port: 8741
+ name: tcp-ha-mate-link
+ - port: 8300
+ name: tcp-ha-conf-sync0
+ - port: 8301
+ name: tcp-ha-conf-sync1
+ - port: 8302
+ name: tcp-ha-conf-sync2
+ clusterIP: None
+ selector:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ publishNotReadyAddresses: true
+{{- end }}
diff --git a/pubsubplus/templates/service.yaml b/pubsubplus/templates/service.yaml
index 33e8929f..ee77e12d 100644
--- a/pubsubplus/templates/service.yaml
+++ b/pubsubplus/templates/service.yaml
@@ -1,27 +1,27 @@
-# Load Service part of template
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ template "solace.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
-{{- if .Values.service.annotations }}
- annotations:
-{{ toYaml .Values.service.annotations | indent 4 }}
-{{- end }}
-spec:
- type: {{.Values.service.type | default "LoadBalancer"}}
- ports:
- {{- range $item := .Values.service.ports }}
- - port: {{ $item.servicePort }}
- targetPort: {{ $item.containerPort }}
- protocol: {{ $item.protocol }}
- name: {{ $item.name }}
- {{- end}}
- selector:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- active: "true"
+# Load Service part of template
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "solace.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+{{- if .Values.service.annotations }}
+ annotations:
+{{ toYaml .Values.service.annotations | indent 4 }}
+{{- end }}
+spec:
+ type: {{.Values.service.type | default "LoadBalancer"}}
+ ports:
+ {{- range $item := .Values.service.ports }}
+ - port: {{ $item.servicePort }}
+ targetPort: {{ $item.containerPort }}
+ protocol: {{ $item.protocol }}
+ name: {{ $item.name }}
+ {{- end}}
+ selector:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ active: "true"
diff --git a/pubsubplus/templates/solaceConfigMap.yaml b/pubsubplus/templates/solaceConfigMap.yaml
index c7f99a7c..f3dce76e 100644
--- a/pubsubplus/templates/solaceConfigMap.yaml
+++ b/pubsubplus/templates/solaceConfigMap.yaml
@@ -1,506 +1,510 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ template "solace.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
-data:
- init.sh: |-
- export username_admin_passwordfilepath="/mnt/disks/secrets/username_admin_password"
- export username_admin_globalaccesslevel=admin
- export service_ssh_port='2222'
- export service_webtransport_port='8008'
- export service_webtransport_tlsport='1443'
- export service_semp_tlsport='1943'
- export logging_debug_output=all
-{{- if eq .Values.solace.size "dev" }}
- export system_scaling_maxconnectioncount="100"
-{{- else if eq .Values.solace.size "prod100" }}
- export system_scaling_maxconnectioncount="100"
-{{- else if eq .Values.solace.size "prod1k" }}
- export system_scaling_maxconnectioncount="1000"
-{{- else if eq .Values.solace.size "prod10k" }}
- export system_scaling_maxconnectioncount="10000"
-{{- else if eq .Values.solace.size "prod100k" }}
- export system_scaling_maxconnectioncount="100000"
-{{- else if eq .Values.solace.size "prod200k" }}
- export system_scaling_maxconnectioncount="200000"
-{{- end }}
-{{- if and (.Values.tls) (.Values.tls.enabled) }}
- cat /mnt/disks/certs/server/{{.Values.tls.certFilename | default "tls.key"}} /mnt/disks/certs/server/{{.Values.tls.certKeyFilename | default "tls.crt"}} > /dev/shm/server.cert
- export tls_servercertificate_filepath="/dev/shm/server.cert"
-{{- end }}
-{{- if .Values.solace.redundancy }}
- # [TODO] KBARR not using correct method of finding ordinal until we bump min Kubernetes release above 1.8.1
- # https://github.com/kubernetes/kubernetes/issues/40651
- # node_ordinal=$(STATEFULSET_ORDINAL)
- IFS='-' read -ra host_array <<< $(hostname)
- node_ordinal=${host_array[-1]}
- if [[ ! -z `echo $STATEFULSET_NAMESPACE` ]]; then
- namespace=`echo $STATEFULSET_NAMESPACE`
- else
- namespace=default
- fi
- service={{ template "solace.fullname" . }}
- # Deal with the fact we cannot accept "-" in routre names
- service_name=$(echo ${service} | sed 's/-//g')
- export routername=$(echo $(hostname) | sed 's/-//g')
- export redundancy_enable=yes
- export configsync_enable=yes
- export redundancy_authentication_presharedkey_key=`cat /mnt/disks/secrets/username_admin_password | awk '{x=$0;for(i=length;i<51;i++)x=x "0";}END{print x}' | base64` # Right-pad with 0s to 50 length
- export service_redundancy_firstlistenport='8300'
- export redundancy_group_node_${service_name}0_nodetype=message_routing
- export redundancy_group_node_${service_name}0_connectvia=${service}-0.${service}-discovery.${namespace}.svc:${service_redundancy_firstlistenport}
- export redundancy_group_node_${service_name}1_nodetype=message_routing
- export redundancy_group_node_${service_name}1_connectvia=${service}-1.${service}-discovery.${namespace}.svc:${service_redundancy_firstlistenport}
- export redundancy_group_node_${service_name}2_nodetype=monitoring
- export redundancy_group_node_${service_name}2_connectvia=${service}-2.${service}-discovery.${namespace}.svc:${service_redundancy_firstlistenport}
-
- case ${node_ordinal} in
- 0)
- export nodetype=message_routing
- export redundancy_matelink_connectvia=${service}-1.${service}-discovery.${namespace}.svc
- export redundancy_activestandbyrole=primary
- ;;
- 1)
- export nodetype=message_routing
- export redundancy_matelink_connectvia=${service}-0.${service}-discovery.${namespace}.svc
- export redundancy_activestandbyrole=backup
- ;;
- 2)
- export nodetype=monitoring
- ;;
- esac
-{{- end }}
-
- startup-broker.sh: |-
- #!/bin/bash
- APP=`basename "$0"`
- IFS='-' read -ra host_array <<< $(hostname)
- node_ordinal=${host_array[-1]}
- echo "`date` INFO: ${APP}-Node ordinal: ${node_ordinal}"
- echo "`date` INFO: ${APP}-Waiting for management API to become available"
- password=`cat /mnt/disks/secrets/username_admin_password`
- loop_guard=60
- pause=10
- count=0
- while [ ${count} -lt ${loop_guard} ]; do
- if /mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 -t ; then
- break
- fi
- run_time=$((${count} * ${pause}))
- ((count++))
- echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Management API not yet accessible"
- sleep ${pause}
- done
- if [ ${count} -eq ${loop_guard} ]; then
- echo "`date` ERROR: ${APP}-Solace Management API never came up" >&2
- exit 1
- fi
-{{- if and (.Values.tls) (.Values.tls.enabled) }}
- rm /dev/shm/server.cert # remove as soon as possible
- cert_results=$(curl --write-out '%{http_code}' --silent --output /dev/null -k -X PATCH -u admin:${password} https://localhost:1943/SEMP/v2/config/ \
- -H "content-type: application/json" \
- -d "{\"tlsServerCertContent\":\"$(cat /mnt/disks/certs/server/{{.Values.tls.certFilename | default "tls.key"}} /mnt/disks/certs/server/{{.Values.tls.certKeyFilename | default "tls.crt"}} | awk '{printf "%s\\n", $0}')\"}")
- if [ "${cert_results}" != "200" ]; then
- echo "`date` ERROR: ${APP}-Unable to set the server certificate, exiting" >&2
- exit 1
- fi
- echo "`date` INFO: ${APP}-Server certificate has been configured"
-{{- end }}
-{{- if .Values.solace.redundancy }}
- # for non-monitor nodes setup redundancy and config-sync
- if [ "${node_ordinal}" != "2" ]; then
- resync_step=""
- role=""
- count=0
- while [ ${count} -lt ${loop_guard} ]; do
- role_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "" \
- -v "/rpc-reply/rpc/show/redundancy/active-standby-role[text()]"`
- run_time=$((${count} * ${pause}))
- case "`echo ${role_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`" in
- "Primary")
- role="primary"
- break
- ;;
- "Backup")
- role="backup"
- break
- ;;
- esac
- ((count++))
- echo "`date` INFO: ${APP}-Waited ${run_time} seconds, got ${role_results} for this node's active-standby role"
- sleep ${pause}
- done
- if [ ${count} -eq ${loop_guard} ]; then
- echo "`date` ERROR: ${APP}-Could not determine this node's active-standby role" >&2
- exit 1
- fi
- # Determine local activity
- count=0
- echo "`date` INFO: ${APP}-Management API is up, determined that this node's active-standby role is: ${role}"
- while [ ${count} -lt ${loop_guard} ]; do
- online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "" \
- -v "/rpc-reply/rpc/show/redundancy/virtual-routers/${role}/status/activity[text()]"`
- local_activity=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
- run_time=$((${count} * ${pause}))
- case "${local_activity}" in
- "Local Active")
- echo "`date` INFO: ${APP}-Node activity status is Local Active, after ${run_time} seconds"
- # We should only be here on new cluster create, if not likely a bug
- # Need to issue assert master to get back into sync"
- resync_step="assert-master"
- break
- ;;
- "Mate Active")
- echo "`date` INFO: ${APP}-Node activity status is Mate Active, after ${run_time} seconds"
- # This is normal state if we are backup or recreated later on
- # will issue a resync master to get back into sync
- resync_step="resync-master"
- break
- ;;
- esac
- ((count++))
- echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Local activity state is: ${local_activity}"
- sleep ${pause}
- done
- if [ ${count} -eq ${loop_guard} ]; then
- echo "`date` ERROR: ${APP}-Local activity state never become Local Active or Mate Active" >&2
- exit 1
- fi
- # If we need to assert master, then we need to wait for mate to reconcile
- if [ "${resync_step}" = "assert-master" ]; then
- count=0
- echo "`date` INFO: ${APP}-Waiting for mate activity state to be 'Standby'"
- while [ ${count} -lt ${loop_guard} ]; do
- online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "" \
- -v "/rpc-reply/rpc/show/redundancy/virtual-routers/${role}/status/detail/priority-reported-by-mate/summary[text()]"`
- mate_activity=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
- run_time=$((${count} * ${pause}))
- case "${mate_activity}" in
- "Standby")
- echo "`date` INFO: ${APP}-Activity state reported by mate is Standby, after ${run_time} seconds"
- break
- ;;
- esac
- ((count++))
- echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Mate activity state is: ${mate_activity}, not yet in sync"
- sleep ${pause}
- done
- if [ ${count} -eq ${loop_guard} ]; then
- echo "`date` ERROR: ${APP}-Mate not in sync, never reached Standby" >&2
- exit 1
- fi
- fi # if assert-master
- # Ensure Config-sync connection state is Connected before proceeding
- count=0
- echo "`date` INFO: ${APP}-Waiting for config-sync connected"
- while [ ${count} -lt ${loop_guard} ]; do
- online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "" \
- -v "/rpc-reply/rpc/show/config-sync/status/client/connection-state"`
- connection_state=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
- run_time=$((${count} * ${pause}))
- case "${connection_state}" in
- "Connected")
- echo "`date` INFO: ${APP}-Config-sync connection state is Connected, after ${run_time} seconds"
- break
- ;;
- esac
- ((count++))
- echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Config-sync connection state is: ${connection_state}, not yet in Connected"
- sleep ${pause}
- done
- if [ ${count} -eq ${loop_guard} ]; then
- echo "`date` ERROR: ${APP}-Config-sync connection state never reached Connected" >&2
- exit 1
- fi
- # Now can issue {resync_step} command
- echo "`date` INFO: ${APP}-Initiating ${resync_step}"
- /mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "<${resync_step}>${resync_step}>"
- /mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "<${resync_step}>*${resync_step}>"
- # Wait for config-sync results
- count=0
- echo "`date` INFO: ${APP}-Waiting for config-sync connected"
- while [ ${count} -lt ${loop_guard} ]; do
- online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "" \
- -v "/rpc-reply/rpc/show/config-sync/status/oper-status"`
- confsyncstatus_results=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
- run_time=$((${count} * ${pause}))
- case "${confsyncstatus_results}" in
- "Up")
- echo "`date` INFO: ${APP}-Config-sync is Up, after ${run_time} seconds"
- break
- ;;
- esac
- ((count++))
- echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Config-sync is: ${confsyncstatus_results}, not yet Up"
- sleep ${pause}
- done
- if [ ${count} -eq ${loop_guard} ]; then
- echo "`date` ERROR: ${APP}-Config-sync never reached state \"Up\"" >&2
- exit 1
- fi
- fi # if not monitor
-{{- end }}
- echo "`date` INFO: ${APP}-PubSub+ Event Broker bringup is complete for this node."
- exit 0
-
-
- readiness_check.sh: |-
- #!/bin/bash
- APP=`basename "$0"`
- LOG_FILE=/usr/sw/var/k8s_readiness_check.log # STDOUT/STDERR goes to k8s event logs but gets cleaned out eventually. This will also persist it.
- tail -n 1000 ${LOG_FILE} > ${LOG_FILE}.tmp; mv -f ${LOG_FILE}.tmp ${LOG_FILE} || : # Limit logs size
- exec > >(tee -a ${LOG_FILE}) 2>&1 # Setup logging
- FINAL_ACTIVITY_LOGGED_TRACKING_FILE=/tmp/final_activity_state_logged
-
- # Function to read Kubernetes metadata labels
- get_label () {
- # Params: $1 label name
- echo $(cat /etc/podinfo/labels | awk -F= '$1=="'${1}'"{print $2}' | xargs);
- }
-
- # Function to set Kubernetes metadata labels
- set_label () {
- # Params: $1 label name, $2 label set value
- #Prevent overdriving Kubernetes infra, don't set activity state to same as previous state
- previous_state=$(get_label "active")
- if [ "${2}" = "${previous_state}" ]; then
- #echo "`date` INFO: ${APP}-Current and Previous state match (${2}), not updating pod label"
- :
- else
- echo "`date` INFO: ${APP}-Updating pod label using K8s API from ${previous_state} to ${2}"
- echo "[{\"op\": \"add\", \"path\": \"/metadata/labels/${1}\", \"value\": \"${2}\" }]" > /tmp/patch_label.json
- K8S=https://kubernetes.default.svc.cluster.local:$KUBERNETES_SERVICE_PORT
- KUBE_TOKEN=$(&2
- rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
- fi
- fi
- fi
- }
-
- # Main logic: note that there are no re-tries here, if check fails then return not ready.
-{{- if .Values.solace.redundancy }}
- # HA config
- IFS='-' read -ra host_array <<< $(hostname)
- node_ordinal=${host_array[-1]}
- password=`cat /mnt/disks/secrets/username_admin_password`
-
- # For update (includes SolOS upgrade) purposes, additional checks are required for readiness state when the pod has been started
- # This is an update if the LASTVERSION_FILE with K8s controller-revision-hash exists and contents differ from current value
- LASTVERSION_FILE=/var/lib/solace/var/lastConfigRevisionBeforeReboot
- if [ -f ${LASTVERSION_FILE} ] && [[ $(cat ${LASTVERSION_FILE}) != $(get_label "controller-revision-hash") ]] ; then
- echo "`date` INFO: ${APP}-Upgrade detected, running additional checks..."
- # Check redundancy
- results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "" \
- -v "/rpc-reply/rpc/show/redundancy/redundancy-status"`
- redundancystatus_results=`echo ${results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
- if [ "${redundancystatus_results}" != "Up" ]; then
- echo "`date` INFO: ${APP}-Redundancy state is not yet up."
- rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
- fi
- # Additionally check config-sync status for non-monitoring nodes
- if [ "${node_ordinal}" != "2" ]; then
- results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "" \
- -v "/rpc-reply/rpc/show/config-sync/status/oper-status"`
- confsyncstatus_results=`echo ${results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
- if [ "${confsyncstatus_results}" != "Up" ]; then
- echo "`date` INFO: ${APP}-Config-sync state is not yet up."
- rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
- fi
- fi
- fi
- # Record current version in LASTVERSION_FILE
- echo $(get_label "controller-revision-hash") > ${LASTVERSION_FILE}
- # For monitor node just check for 3 online nodes in group; active label will never be set
- if [ "${node_ordinal}" = "2" ]; then
- role_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "" \
- -c "/rpc-reply/rpc/show/redundancy/group-node/status[text() = \"Online\"]"`
- if [[ ${role_results} != *""* ]]; then
- errorinfo=`echo ${results} | xmllint -xpath "string(returnInfo/errorInfo)" - 2>/dev/null` || errorinfo=
- echo "`date` INFO: ${APP}-Waiting for valid server status response, got ${errorinfo}"
- rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
- fi
- nodes_online=`echo ${role_results} | xmllint -xpath "string(returnInfo/countSearchResult)" -`
- if [ "$nodes_online" -eq "3" ]; then
- if [ ! -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE} ]; then
- echo "`date` INFO: ${APP}-All nodes online, monitor node is redundancy ready"
- touch ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}
- fi
- exit 0
- else
- echo "`date` INFO: ${APP}-Monitor node is not redundancy ready, ${nodes_online} of 3 nodes online"
- rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
- fi
- fi # End Monitor Node
- # For Primary or Backup nodes set both service readiness (active label) and k8s readiness (exit return value)
- health_result=`curl -s -o /dev/null -w "%{http_code}" http://localhost:5550/health-check/guaranteed-active`
- case "${health_result}" in
- "200")
- if [ ! -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE} ]; then
- echo "`date` INFO: ${APP}-HA Event Broker health check reported 200, message spool is up"
- touch ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}
- fi
- set_label "active" "true"
- exit 0
- ;;
- "503")
- if [[ $(get_label "active") = "true" ]]; then echo "`date` INFO: ${APP}-HA Event Broker health check reported 503"; fi
- set_label "active" "false"
- # Further check is required to determine readiness
- ;;
- *)
- echo "`date` WARN: ${APP}-HA Event Broker health check reported unexpected ${health_result}"
- set_label "active" "false"
- rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
- esac
- # At this point analyzing readiness after health check returned 503 - checking if Event Broker is Standby
- case "${node_ordinal}" in
- "0")
- config_role="primary"
- ;;
- "1")
- config_role="backup"
- ;;
- esac
- online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
- -q "" \
- -v "/rpc-reply/rpc/show/redundancy/virtual-routers/${config_role}/status/activity[text()]"`
- local_activity=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
- case "${local_activity}" in
- "Mate Active")
- # Pass readiness check
- if [ ! -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE} ]; then
- echo "`date` INFO: ${APP}-Redundancy is up and node is mate Active"
- touch ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}
- fi
- exit 0
- ;;
- *)
- echo "`date` WARN: ${APP}-Health check returned 503 and local activity state is: ${local_activity}, failing readiness check."
- rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
- ;;
- esac
-{{- else }}
- # nonHA config
- health_result=`curl -s -o /dev/null -w "%{http_code}" http://localhost:5550/health-check/guaranteed-active`
- case "${health_result}" in
- "200")
- if [ ! -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE} ]; then
- echo "`date` INFO: ${APP}-nonHA Event Broker health check reported 200, message spool is up"
- touch ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}
- fi
- set_label "active" "true"
- exit 0
- ;;
- "503")
- if [[ $(get_label "active") = "true" ]]; then echo "`date` INFO: ${APP}-nonHA Event Broker health check reported 503, message spool is down"; fi
- set_label "active" "false"
- # Fail readiness check
- rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
- ;;
- *)
- echo "`date` WARN: ${APP}-nonHA Event Broker health check reported ${health_result}"
- set_label "active" "false"
- # Fail readiness check
- rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
- esac
-{{- end }}
- semp_query.sh: |-
- #!/bin/bash
- APP=`basename "$0"`
- OPTIND=1 # Reset in case getopts has been used previously in the shell.
- # Initialize our own variables:
- count_search=""
- name=""
- password=""
- query=""
- url=""
- value_search=""
- test_connection_only=false
- script_name=$0
- verbose=0
- while getopts "c:n:p:q:u:v:t" opt; do
- case "$opt" in
- c) count_search=$OPTARG
- ;;
- n) username=$OPTARG
- ;;
- p) password=$OPTARG
- ;;
- q) query=$OPTARG
- ;;
- u) url=$OPTARG
- ;;
- v) value_search=$OPTARG
- ;;
- t) test_connection_only=true
- ;;
- esac
- done
- shift $((OPTIND-1))
- [ "$1" = "--" ] && shift
- verbose=1
- #echo "`date` INFO: ${APP}-${script_name}: count_search=${count_search} ,username=${username} ,password=xxx query=${query} \
- # ,url=${url} ,value_search=${value_search} ,Leftovers: $@" >&2
- if [[ ${url} = "" || ${username} = "" || ${password} = "" ]]; then
- echo "`date` ERROR: ${APP}-${script_name}: url, username, password are madatory fields" >&2
- echo 'missing parameter'
- exit 1
- fi
- if [ "`curl --write-out '%{http_code}' --silent --output /dev/null -u ${username}:${password} ${url}/SEMP`" != "200" ] ; then
- echo "management host is not responding"
- exit 1
- fi
- if [ "$test_connection_only" = true ] ; then
- exit 0 # done here, connection is up
- fi
- query_response=`curl -sS -u ${username}:${password} ${url}/SEMP -d "${query}"`
- # Validate first char of response is "<", otherwise no hope of being valid xml
- if [[ ${query_response:0:1} != "<" ]] ; then
- echo "no valid xml returned"
- exit 1
- fi
- query_response_code=`echo $query_response | xmllint -xpath 'string(/rpc-reply/execute-result/@code)' -`
-
- if [[ -z ${query_response_code} && ${query_response_code} != "ok" ]]; then
- echo "query failed -${query_response_code}-"
- exit 1
- fi
- #echo "`date` INFO: ${APP}-${script_name}: query passed ${query_response_code}" >&2
- if [[ ! -z $value_search ]]; then
- value_result=`echo $query_response | xmllint -xpath "string($value_search)" -`
- echo "${value_result}"
- exit 0
- fi
- if [[ ! -z $count_search ]]; then
- count_line=`echo $query_response | xmllint -xpath "$count_search" -`
- count_string=`echo $count_search | cut -d '"' -f 2`
- count_result=`echo ${count_line} | tr "><" "\n" | grep -c ${count_string}`
- echo "${count_result}"
- exit 0
- fi
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "solace.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+data:
+ init.sh: |-
+ export username_admin_passwordfilepath="/mnt/disks/secrets/username_admin_password"
+ export username_admin_globalaccesslevel=admin
+ export service_ssh_port='2222'
+ export service_webtransport_port='8008'
+ export service_webtransport_tlsport='1443'
+ export service_semp_tlsport='1943'
+ export logging_debug_output=all
+{{- if .Values.solace.systemScaling }}
+ export system_scaling_maxconnectioncount={{ required "A valid maxConnections required!" .Values.solace.systemScaling.maxConnections | quote }}
+ export system_scaling_maxqueuemessagecount={{ required "A valid maxQueueMessages required!" .Values.solace.systemScaling.maxQueueMessages | quote }}
+ export messagespool_maxspoolusage={{ required "A valid maxSpoolUsage required!" .Values.solace.systemScaling.maxSpoolUsage | quote }}
+{{- else if eq .Values.solace.size "dev" }}
+ export system_scaling_maxconnectioncount="100"
+{{- else if eq .Values.solace.size "prod100" }}
+ export system_scaling_maxconnectioncount="100"
+{{- else if eq .Values.solace.size "prod1k" }}
+ export system_scaling_maxconnectioncount="1000"
+{{- else if eq .Values.solace.size "prod10k" }}
+ export system_scaling_maxconnectioncount="10000"
+{{- else if eq .Values.solace.size "prod100k" }}
+ export system_scaling_maxconnectioncount="100000"
+{{- else if eq .Values.solace.size "prod200k" }}
+ export system_scaling_maxconnectioncount="200000"
+{{- end }}
+{{- if and (.Values.tls) (.Values.tls.enabled) }}
+ cat /mnt/disks/certs/server/{{.Values.tls.certFilename | default "tls.key"}} /mnt/disks/certs/server/{{.Values.tls.certKeyFilename | default "tls.crt"}} > /dev/shm/server.cert
+ export tls_servercertificate_filepath="/dev/shm/server.cert"
+{{- end }}
+{{- if .Values.solace.redundancy }}
+ # [TODO] KBARR not using correct method of finding ordinal until we bump min Kubernetes release above 1.8.1
+ # https://github.com/kubernetes/kubernetes/issues/40651
+ # node_ordinal=$(STATEFULSET_ORDINAL)
+ IFS='-' read -ra host_array <<< $(hostname)
+ node_ordinal=${host_array[-1]}
+ if [[ ! -z `echo $STATEFULSET_NAMESPACE` ]]; then
+ namespace=`echo $STATEFULSET_NAMESPACE`
+ else
+ namespace=default
+ fi
+ service={{ template "solace.fullname" . }}
+ # Deal with the fact we cannot accept "-" in routre names
+ service_name=$(echo ${service} | sed 's/-//g')
+ export routername=$(echo $(hostname) | sed 's/-//g')
+ export redundancy_enable=yes
+ export configsync_enable=yes
+ export redundancy_authentication_presharedkey_key=`cat /mnt/disks/secrets/username_admin_password | awk '{x=$0;for(i=length;i<51;i++)x=x "0";}END{print x}' | base64` # Right-pad with 0s to 50 length
+ export service_redundancy_firstlistenport='8300'
+ export redundancy_group_node_${service_name}0_nodetype=message_routing
+ export redundancy_group_node_${service_name}0_connectvia=${service}-0.${service}-discovery.${namespace}.svc:${service_redundancy_firstlistenport}
+ export redundancy_group_node_${service_name}1_nodetype=message_routing
+ export redundancy_group_node_${service_name}1_connectvia=${service}-1.${service}-discovery.${namespace}.svc:${service_redundancy_firstlistenport}
+ export redundancy_group_node_${service_name}2_nodetype=monitoring
+ export redundancy_group_node_${service_name}2_connectvia=${service}-2.${service}-discovery.${namespace}.svc:${service_redundancy_firstlistenport}
+
+ case ${node_ordinal} in
+ 0)
+ export nodetype=message_routing
+ export redundancy_matelink_connectvia=${service}-1.${service}-discovery.${namespace}.svc
+ export redundancy_activestandbyrole=primary
+ ;;
+ 1)
+ export nodetype=message_routing
+ export redundancy_matelink_connectvia=${service}-0.${service}-discovery.${namespace}.svc
+ export redundancy_activestandbyrole=backup
+ ;;
+ 2)
+ export nodetype=monitoring
+ ;;
+ esac
+{{- end }}
+
+ startup-broker.sh: |-
+ #!/bin/bash
+ APP=`basename "$0"`
+ IFS='-' read -ra host_array <<< $(hostname)
+ node_ordinal=${host_array[-1]}
+ echo "`date` INFO: ${APP}-Node ordinal: ${node_ordinal}"
+ echo "`date` INFO: ${APP}-Waiting for management API to become available"
+ password=`cat /mnt/disks/secrets/username_admin_password`
+ loop_guard=60
+ pause=10
+ count=0
+ while [ ${count} -lt ${loop_guard} ]; do
+ if /mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 -t ; then
+ break
+ fi
+ run_time=$((${count} * ${pause}))
+ ((count++))
+ echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Management API not yet accessible"
+ sleep ${pause}
+ done
+ if [ ${count} -eq ${loop_guard} ]; then
+ echo "`date` ERROR: ${APP}-Solace Management API never came up" >&2
+ exit 1
+ fi
+{{- if and (.Values.tls) (.Values.tls.enabled) }}
+ rm /dev/shm/server.cert # remove as soon as possible
+ cert_results=$(curl --write-out '%{http_code}' --silent --output /dev/null -k -X PATCH -u admin:${password} https://localhost:1943/SEMP/v2/config/ \
+ -H "content-type: application/json" \
+ -d "{\"tlsServerCertContent\":\"$(cat /mnt/disks/certs/server/{{.Values.tls.certFilename | default "tls.key"}} /mnt/disks/certs/server/{{.Values.tls.certKeyFilename | default "tls.crt"}} | awk '{printf "%s\\n", $0}')\"}")
+ if [ "${cert_results}" != "200" ]; then
+ echo "`date` ERROR: ${APP}-Unable to set the server certificate, exiting" >&2
+ exit 1
+ fi
+ echo "`date` INFO: ${APP}-Server certificate has been configured"
+{{- end }}
+{{- if .Values.solace.redundancy }}
+ # for non-monitor nodes setup redundancy and config-sync
+ if [ "${node_ordinal}" != "2" ]; then
+ resync_step=""
+ role=""
+ count=0
+ while [ ${count} -lt ${loop_guard} ]; do
+ role_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "" \
+ -v "/rpc-reply/rpc/show/redundancy/active-standby-role[text()]"`
+ run_time=$((${count} * ${pause}))
+ case "`echo ${role_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`" in
+ "Primary")
+ role="primary"
+ break
+ ;;
+ "Backup")
+ role="backup"
+ break
+ ;;
+ esac
+ ((count++))
+ echo "`date` INFO: ${APP}-Waited ${run_time} seconds, got ${role_results} for this node's active-standby role"
+ sleep ${pause}
+ done
+ if [ ${count} -eq ${loop_guard} ]; then
+ echo "`date` ERROR: ${APP}-Could not determine this node's active-standby role" >&2
+ exit 1
+ fi
+ # Determine local activity
+ count=0
+ echo "`date` INFO: ${APP}-Management API is up, determined that this node's active-standby role is: ${role}"
+ while [ ${count} -lt ${loop_guard} ]; do
+ online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "" \
+ -v "/rpc-reply/rpc/show/redundancy/virtual-routers/${role}/status/activity[text()]"`
+ local_activity=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
+ run_time=$((${count} * ${pause}))
+ case "${local_activity}" in
+ "Local Active")
+ echo "`date` INFO: ${APP}-Node activity status is Local Active, after ${run_time} seconds"
+ # We should only be here on new cluster create, if not likely a bug
+ # Need to issue assert master to get back into sync"
+ resync_step="assert-master"
+ break
+ ;;
+ "Mate Active")
+ echo "`date` INFO: ${APP}-Node activity status is Mate Active, after ${run_time} seconds"
+ # This is normal state if we are backup or recreated later on
+ # will issue a resync master to get back into sync
+ resync_step="resync-master"
+ break
+ ;;
+ esac
+ ((count++))
+ echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Local activity state is: ${local_activity}"
+ sleep ${pause}
+ done
+ if [ ${count} -eq ${loop_guard} ]; then
+ echo "`date` ERROR: ${APP}-Local activity state never become Local Active or Mate Active" >&2
+ exit 1
+ fi
+ # If we need to assert master, then we need to wait for mate to reconcile
+ if [ "${resync_step}" = "assert-master" ]; then
+ count=0
+ echo "`date` INFO: ${APP}-Waiting for mate activity state to be 'Standby'"
+ while [ ${count} -lt ${loop_guard} ]; do
+ online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "" \
+ -v "/rpc-reply/rpc/show/redundancy/virtual-routers/${role}/status/detail/priority-reported-by-mate/summary[text()]"`
+ mate_activity=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
+ run_time=$((${count} * ${pause}))
+ case "${mate_activity}" in
+ "Standby")
+ echo "`date` INFO: ${APP}-Activity state reported by mate is Standby, after ${run_time} seconds"
+ break
+ ;;
+ esac
+ ((count++))
+ echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Mate activity state is: ${mate_activity}, not yet in sync"
+ sleep ${pause}
+ done
+ if [ ${count} -eq ${loop_guard} ]; then
+ echo "`date` ERROR: ${APP}-Mate not in sync, never reached Standby" >&2
+ exit 1
+ fi
+ fi # if assert-master
+ # Ensure Config-sync connection state is Connected before proceeding
+ count=0
+ echo "`date` INFO: ${APP}-Waiting for config-sync connected"
+ while [ ${count} -lt ${loop_guard} ]; do
+ online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "" \
+ -v "/rpc-reply/rpc/show/config-sync/status/client/connection-state"`
+ connection_state=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
+ run_time=$((${count} * ${pause}))
+ case "${connection_state}" in
+ "Connected")
+ echo "`date` INFO: ${APP}-Config-sync connection state is Connected, after ${run_time} seconds"
+ break
+ ;;
+ esac
+ ((count++))
+ echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Config-sync connection state is: ${connection_state}, not yet in Connected"
+ sleep ${pause}
+ done
+ if [ ${count} -eq ${loop_guard} ]; then
+ echo "`date` ERROR: ${APP}-Config-sync connection state never reached Connected" >&2
+ exit 1
+ fi
+ # Now can issue {resync_step} command
+ echo "`date` INFO: ${APP}-Initiating ${resync_step}"
+ /mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "<${resync_step}>${resync_step}>"
+ /mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "<${resync_step}>*${resync_step}>"
+ # Wait for config-sync results
+ count=0
+ echo "`date` INFO: ${APP}-Waiting for config-sync connected"
+ while [ ${count} -lt ${loop_guard} ]; do
+ online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "" \
+ -v "/rpc-reply/rpc/show/config-sync/status/oper-status"`
+ confsyncstatus_results=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
+ run_time=$((${count} * ${pause}))
+ case "${confsyncstatus_results}" in
+ "Up")
+ echo "`date` INFO: ${APP}-Config-sync is Up, after ${run_time} seconds"
+ break
+ ;;
+ esac
+ ((count++))
+ echo "`date` INFO: ${APP}-Waited ${run_time} seconds, Config-sync is: ${confsyncstatus_results}, not yet Up"
+ sleep ${pause}
+ done
+ if [ ${count} -eq ${loop_guard} ]; then
+ echo "`date` ERROR: ${APP}-Config-sync never reached state \"Up\"" >&2
+ exit 1
+ fi
+ fi # if not monitor
+{{- end }}
+ echo "`date` INFO: ${APP}-PubSub+ Event Broker bringup is complete for this node."
+ exit 0
+
+
+ readiness_check.sh: |-
+ #!/bin/bash
+ APP=`basename "$0"`
+ LOG_FILE=/usr/sw/var/k8s_readiness_check.log # STDOUT/STDERR goes to k8s event logs but gets cleaned out eventually. This will also persist it.
+ tail -n 1000 ${LOG_FILE} > ${LOG_FILE}.tmp; mv -f ${LOG_FILE}.tmp ${LOG_FILE} || : # Limit logs size
+ exec > >(tee -a ${LOG_FILE}) 2>&1 # Setup logging
+ FINAL_ACTIVITY_LOGGED_TRACKING_FILE=/tmp/final_activity_state_logged
+
+ # Function to read Kubernetes metadata labels
+ get_label () {
+ # Params: $1 label name
+ echo $(cat /etc/podinfo/labels | awk -F= '$1=="'${1}'"{print $2}' | xargs);
+ }
+
+ # Function to set Kubernetes metadata labels
+ set_label () {
+ # Params: $1 label name, $2 label set value
+ #Prevent overdriving Kubernetes infra, don't set activity state to same as previous state
+ previous_state=$(get_label "active")
+ if [ "${2}" = "${previous_state}" ]; then
+ #echo "`date` INFO: ${APP}-Current and Previous state match (${2}), not updating pod label"
+ :
+ else
+ echo "`date` INFO: ${APP}-Updating pod label using K8s API from ${previous_state} to ${2}"
+ echo "[{\"op\": \"add\", \"path\": \"/metadata/labels/${1}\", \"value\": \"${2}\" }]" > /tmp/patch_label.json
+ K8S=https://kubernetes.default.svc.cluster.local:$KUBERNETES_SERVICE_PORT
+ KUBE_TOKEN=$(&2
+ rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
+ fi
+ fi
+ fi
+ }
+
+ # Main logic: note that there are no re-tries here, if check fails then return not ready.
+{{- if .Values.solace.redundancy }}
+ # HA config
+ IFS='-' read -ra host_array <<< $(hostname)
+ node_ordinal=${host_array[-1]}
+ password=`cat /mnt/disks/secrets/username_admin_password`
+
+ # For update (includes SolOS upgrade) purposes, additional checks are required for readiness state when the pod has been started
+ # This is an update if the LASTVERSION_FILE with K8s controller-revision-hash exists and contents differ from current value
+ LASTVERSION_FILE=/var/lib/solace/var/lastConfigRevisionBeforeReboot
+ if [ -f ${LASTVERSION_FILE} ] && [[ $(cat ${LASTVERSION_FILE}) != $(get_label "controller-revision-hash") ]] ; then
+ echo "`date` INFO: ${APP}-Upgrade detected, running additional checks..."
+ # Check redundancy
+ results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "" \
+ -v "/rpc-reply/rpc/show/redundancy/redundancy-status"`
+ redundancystatus_results=`echo ${results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
+ if [ "${redundancystatus_results}" != "Up" ]; then
+ echo "`date` INFO: ${APP}-Redundancy state is not yet up."
+ rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
+ fi
+ # Additionally check config-sync status for non-monitoring nodes
+ if [ "${node_ordinal}" != "2" ]; then
+ results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "" \
+ -v "/rpc-reply/rpc/show/config-sync/status/oper-status"`
+ confsyncstatus_results=`echo ${results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
+ if [ "${confsyncstatus_results}" != "Up" ]; then
+ echo "`date` INFO: ${APP}-Config-sync state is not yet up."
+ rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
+ fi
+ fi
+ fi
+ # Record current version in LASTVERSION_FILE
+ echo $(get_label "controller-revision-hash") > ${LASTVERSION_FILE}
+ # For monitor node just check for 3 online nodes in group; active label will never be set
+ if [ "${node_ordinal}" = "2" ]; then
+ role_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "" \
+ -c "/rpc-reply/rpc/show/redundancy/group-node/status[text() = \"Online\"]"`
+ if [[ ${role_results} != *""* ]]; then
+ errorinfo=`echo ${results} | xmllint -xpath "string(returnInfo/errorInfo)" - 2>/dev/null` || errorinfo=
+ echo "`date` INFO: ${APP}-Waiting for valid server status response, got ${errorinfo}"
+ rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
+ fi
+ nodes_online=`echo ${role_results} | xmllint -xpath "string(returnInfo/countSearchResult)" -`
+ if [ "$nodes_online" -eq "3" ]; then
+ if [ ! -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE} ]; then
+ echo "`date` INFO: ${APP}-All nodes online, monitor node is redundancy ready"
+ touch ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}
+ fi
+ exit 0
+ else
+ echo "`date` INFO: ${APP}-Monitor node is not redundancy ready, ${nodes_online} of 3 nodes online"
+ rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
+ fi
+ fi # End Monitor Node
+ # For Primary or Backup nodes set both service readiness (active label) and k8s readiness (exit return value)
+ health_result=`curl -s -o /dev/null -w "%{http_code}" http://localhost:5550/health-check/guaranteed-active`
+ case "${health_result}" in
+ "200")
+ if [ ! -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE} ]; then
+ echo "`date` INFO: ${APP}-HA Event Broker health check reported 200, message spool is up"
+ touch ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}
+ fi
+ set_label "active" "true"
+ exit 0
+ ;;
+ "503")
+ if [[ $(get_label "active") = "true" ]]; then echo "`date` INFO: ${APP}-HA Event Broker health check reported 503"; fi
+ set_label "active" "false"
+ # Further check is required to determine readiness
+ ;;
+ *)
+ echo "`date` WARN: ${APP}-HA Event Broker health check reported unexpected ${health_result}"
+ set_label "active" "false"
+ rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
+ esac
+ # At this point analyzing readiness after health check returned 503 - checking if Event Broker is Standby
+ case "${node_ordinal}" in
+ "0")
+ config_role="primary"
+ ;;
+ "1")
+ config_role="backup"
+ ;;
+ esac
+ online_results=`/mnt/disks/solace/semp_query.sh -n admin -p ${password} -u http://localhost:8080 \
+ -q "" \
+ -v "/rpc-reply/rpc/show/redundancy/virtual-routers/${config_role}/status/activity[text()]"`
+ local_activity=`echo ${online_results} | xmllint -xpath "string(returnInfo/valueSearchResult)" -`
+ case "${local_activity}" in
+ "Mate Active")
+ # Pass readiness check
+ if [ ! -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE} ]; then
+ echo "`date` INFO: ${APP}-Redundancy is up and node is mate Active"
+ touch ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}
+ fi
+ exit 0
+ ;;
+ *)
+ echo "`date` WARN: ${APP}-Health check returned 503 and local activity state is: ${local_activity}, failing readiness check."
+ rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
+ ;;
+ esac
+{{- else }}
+ # nonHA config
+ health_result=`curl -s -o /dev/null -w "%{http_code}" http://localhost:5550/health-check/guaranteed-active`
+ case "${health_result}" in
+ "200")
+ if [ ! -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE} ]; then
+ echo "`date` INFO: ${APP}-nonHA Event Broker health check reported 200, message spool is up"
+ touch ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}
+ fi
+ set_label "active" "true"
+ exit 0
+ ;;
+ "503")
+ if [[ $(get_label "active") = "true" ]]; then echo "`date` INFO: ${APP}-nonHA Event Broker health check reported 503, message spool is down"; fi
+ set_label "active" "false"
+ # Fail readiness check
+ rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
+ ;;
+ *)
+ echo "`date` WARN: ${APP}-nonHA Event Broker health check reported ${health_result}"
+ set_label "active" "false"
+ # Fail readiness check
+ rm -f ${FINAL_ACTIVITY_LOGGED_TRACKING_FILE}; exit 1
+ esac
+{{- end }}
+ semp_query.sh: |-
+ #!/bin/bash
+ APP=`basename "$0"`
+ OPTIND=1 # Reset in case getopts has been used previously in the shell.
+ # Initialize our own variables:
+ count_search=""
+ name=""
+ password=""
+ query=""
+ url=""
+ value_search=""
+ test_connection_only=false
+ script_name=$0
+ verbose=0
+ while getopts "c:n:p:q:u:v:t" opt; do
+ case "$opt" in
+ c) count_search=$OPTARG
+ ;;
+ n) username=$OPTARG
+ ;;
+ p) password=$OPTARG
+ ;;
+ q) query=$OPTARG
+ ;;
+ u) url=$OPTARG
+ ;;
+ v) value_search=$OPTARG
+ ;;
+ t) test_connection_only=true
+ ;;
+ esac
+ done
+ shift $((OPTIND-1))
+ [ "$1" = "--" ] && shift
+ verbose=1
+ #echo "`date` INFO: ${APP}-${script_name}: count_search=${count_search} ,username=${username} ,password=xxx query=${query} \
+ # ,url=${url} ,value_search=${value_search} ,Leftovers: $@" >&2
+ if [[ ${url} = "" || ${username} = "" || ${password} = "" ]]; then
+ echo "`date` ERROR: ${APP}-${script_name}: url, username, password are madatory fields" >&2
+ echo 'missing parameter'
+ exit 1
+ fi
+ if [ "`curl --write-out '%{http_code}' --silent --output /dev/null -u ${username}:${password} ${url}/SEMP`" != "200" ] ; then
+ echo "management host is not responding"
+ exit 1
+ fi
+ if [ "$test_connection_only" = true ] ; then
+ exit 0 # done here, connection is up
+ fi
+ query_response=`curl -sS -u ${username}:${password} ${url}/SEMP -d "${query}"`
+ # Validate first char of response is "<", otherwise no hope of being valid xml
+ if [[ ${query_response:0:1} != "<" ]] ; then
+ echo "no valid xml returned"
+ exit 1
+ fi
+ query_response_code=`echo $query_response | xmllint -xpath 'string(/rpc-reply/execute-result/@code)' -`
+
+ if [[ -z ${query_response_code} && ${query_response_code} != "ok" ]]; then
+ echo "query failed -${query_response_code}-"
+ exit 1
+ fi
+ #echo "`date` INFO: ${APP}-${script_name}: query passed ${query_response_code}" >&2
+ if [[ ! -z $value_search ]]; then
+ value_result=`echo $query_response | xmllint -xpath "string($value_search)" -`
+ echo "${value_result}"
+ exit 0
+ fi
+ if [[ ! -z $count_search ]]; then
+ count_line=`echo $query_response | xmllint -xpath "$count_search" -`
+ count_string=`echo $count_search | cut -d '"' -f 2`
+ count_result=`echo ${count_line} | tr "><" "\n" | grep -c ${count_string}`
+ echo "${count_result}"
+ exit 0
+ fi
diff --git a/pubsubplus/templates/solacePVCForMonitor.yaml b/pubsubplus/templates/solacePVCForMonitor.yaml
new file mode 100644
index 00000000..fe9b375e
--- /dev/null
+++ b/pubsubplus/templates/solacePVCForMonitor.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.solace.redundancy .Values.storage.monitorStorageSize }}
+# This is a pre-install hook to create a minimum-size PVC for Monitor pods in an HA deployment
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: data-{{ template "solace.fullname" . }}-2
+ labels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ annotations:
+ "helm.sh/hook": pre-install
+spec:
+ {{- if .Values.storage.useStorageClass }}
+ storageClassName: {{ .Values.storage.useStorageClass }}
+ {{- end}}
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.storage.monitorStorageSize }}
+{{- end }}
\ No newline at end of file
diff --git a/pubsubplus/templates/solaceStatefulSet.yaml b/pubsubplus/templates/solaceStatefulSet.yaml
index 73b28f32..bf5a316b 100644
--- a/pubsubplus/templates/solaceStatefulSet.yaml
+++ b/pubsubplus/templates/solaceStatefulSet.yaml
@@ -1,246 +1,271 @@
-# Create the StatefulSet needed for redundancy
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: {{ template "solace.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
-spec:
- selector:
- matchLabels:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- serviceName: {{ template "solace.fullname" . }}-discovery
- replicas: {{- if .Values.solace.redundancy }} 3 {{- else }} 1 {{- end }}
- podManagementPolicy: Parallel
- updateStrategy:
- type: RollingUpdate
- template:
- metadata:
- labels:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- spec:
-{{- if .Values.image.pullSecretName }}
- imagePullSecrets:
- - name: {{ .Values.image.pullSecretName}}
-{{- end}}
-{{- if .Values.securityContext.enabled }}
- securityContext:
- fsGroup: {{ .Values.securityContext.fsGroup | int64 }}
- {{- if and (.Values.securityContext.runAsUser) }}
- runAsUser: {{ .Values.securityContext.runAsUser | int64 }}
- {{- end }}
-{{- end }}
- serviceAccountName: {{ template "solace.serviceAccountName" . }}
- terminationGracePeriodSeconds: 1200
- containers:
- - name: {{ .Chart.Name }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
- imagePullPolicy: {{ .Values.image.pullPolicy | default "IfNotPresent" }}
- resources:
- requests:
-{{- if eq .Values.solace.size "dev" }}
- cpu: "1"
- memory: 3410Mi
-{{- else if eq .Values.solace.size "prod100" }}
- cpu: "2"
- memory: 4025Mi
-{{- else if eq .Values.solace.size "prod1k" }}
- cpu: "2"
- memory: 6515Mi
-{{- else if eq .Values.solace.size "prod10k" }}
- cpu: "4"
- memory: 12435Mi
-{{- else if eq .Values.solace.size "prod100k" }}
- cpu: "8"
- memory: 30925Mi
-{{- else if eq .Values.solace.size "prod200k" }}
- cpu: "12"
- memory: 52581Mi
-{{- end }}
- limits:
-{{- if eq .Values.solace.size "dev" }}
- cpu: "2"
- memory: 3410Mi
-{{- else if eq .Values.solace.size "prod100" }}
- cpu: "2"
- memory: 4025Mi
-{{- else if eq .Values.solace.size "prod1k" }}
- cpu: "2"
- memory: 6515Mi
-{{- else if eq .Values.solace.size "prod10k" }}
- cpu: "4"
- memory: 12435Mi
-{{- else if eq .Values.solace.size "prod100k" }}
- cpu: "8"
- memory: 30925Mi
-{{- else if eq .Values.solace.size "prod200k" }}
- cpu: "12"
- memory: 52581Mi
-{{- end }}
- livenessProbe:
- tcpSocket:
- port: 8080
- initialDelaySeconds: 300
- timeoutSeconds: 5
- readinessProbe:
- initialDelaySeconds: 30
- periodSeconds: 5
- exec:
- command:
- - /mnt/disks/solace/readiness_check.sh
- securityContext:
- privileged: false
-{{- if semverCompare "<9.4" (default "9.4" (regexFind "\\d+\\.\\d+" .Values.image.tag)) }}
- # need capabilities opened up if PubSub+ version is lower than 9.4
- capabilities:
- add:
- - IPC_LOCK
- - SYS_NICE
-{{- end }}
- env:
- - name: STATEFULSET_NAME
- value: {{ template "solace.fullname" . }}
- - name: STATEFULSET_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: TZ
- value: {{ printf "%s/%s" ":/usr/share/zoneinfo" (default "UTC" .Values.solace.timezone) }}
- - name: UMASK
- value: "0022"
-{{- if .Values.solace.extraEnvVars }}
- {{- range $item := .Values.solace.extraEnvVars }}
- - name: {{ $item.name }}
- value: {{ $item.value | quote }}
- {{- end }}
-{{- end }}
-{{- if or .Values.solace.extraEnvVarsCM .Values.solace.extraEnvVarsSecret }}
- envFrom:
- {{- if .Values.solace.extraEnvVarsCM }}
- - configMapRef:
- name: {{ tpl .Values.solace.extraEnvVarsCM . | quote }}
- {{- end }}
- {{- if .Values.solace.extraEnvVarsSecret }}
- - secretRef:
- name: {{ tpl .Values.solace.extraEnvVarsSecret . | quote }}
- {{- end }}
-{{- end }}
- command:
- - bash
- - "-ec"
- - |
- source /mnt/disks/solace/init.sh
- # not using postinstall hooks because of order dependencies
- # launch config check - readiness check script will be launched by readinessProbe
- nohup /mnt/disks/solace/startup-broker.sh &
- /usr/sbin/boot.sh
- lifecycle:
- preStop:
- exec:
- command:
- - bash
- - "-ec"
- - |
- while ! pgrep solacedaemon ; do sleep 1; done
- killall solacedaemon;
- while [ ! -d /usr/sw/var/db.upgrade ]; do sleep 1; done;
- volumeMounts:
- - name: podinfo
- mountPath: /etc/podinfo
- - name: config-map
- mountPath: /mnt/disks/solace
- - name: secrets
- mountPath: /mnt/disks/secrets
- readOnly: true
-{{- if and (.Values.tls) (.Values.tls.enabled) }}
- - name: server-certs
- mountPath: /mnt/disks/certs/server
- readOnly: true
-{{- end }}
- - name: dshm
- mountPath: /dev/shm
- - name: data
- mountPath: /usr/sw/jail
- subPath: jail
- - name: data
- mountPath: /usr/sw/var
- subPath: var
- - name: data
- mountPath: /usr/sw/internalSpool
- subPath: internalSpool
- - name: data
- mountPath: /usr/sw/adb
- subPath: adb
- - name: data
- mountPath: /var/lib/solace/diags
- subPath: diags
-{{- if (not .Values.storage.slow) }}
- # only mount when not using slow storage
- - name: data
- mountPath: /usr/sw/internalSpool/softAdb
- subPath: softAdb
-{{- else }}
- - name: soft-adb-ephemeral
- mountPath: /usr/sw/internalSpool/softAdb
-{{- end }}
- ports:
- {{- range $item := .Values.service.ports }}
- - containerPort: {{ $item.containerPort }}
- protocol: {{ $item.protocol }}
- {{- end}}
- volumes:
- - name: podinfo
- downwardAPI:
- items:
- - path: "labels"
- fieldRef:
- fieldPath: metadata.labels
- - name: config-map
- configMap:
- name: {{ template "solace.fullname" . }}
- defaultMode: 0755
- - name: secrets
- secret:
- secretName: {{ template "solace.fullname" . }}-secrets
- defaultMode: 0400
-{{- if and (.Values.tls) (.Values.tls.enabled) }}
- - name: server-certs
- secret:
- secretName: {{ required "A secret containing the server key and certificates is required when TLS in enabled" .Values.tls.serverCertificatesSecret }}
- defaultMode: 0400
-{{- end }}
- - name: dshm
- emptyDir:
- medium: Memory
-{{- if .Values.storage.slow }}
- - name: soft-adb-ephemeral
- emptyDir: {}
-{{- end }}
-{{- if and (.Values.storage) (not .Values.storage.persistent) }}
- - name: data
- emptyDir: {}
-{{- else if and (.Values.storage) (.Values.storage.customVolumeMount) }}
- - name: data
- {{- with .Values.storage.customVolumeMount }}
-{{ tpl . $ | indent 10 }}
- {{- end }}
-{{- else }}
-
- # This is the default way to acquire volume for the data mount
- volumeClaimTemplates:
- - metadata:
- name: data
- spec:
- {{- if .Values.storage.useStorageClass }}
- storageClassName: {{ .Values.storage.useStorageClass }}
- {{- end}}
- accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: {{ .Values.storage.size}}
-{{- end }}
+# Create the StatefulSet needed for redundancy
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ template "solace.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ serviceName: {{ template "solace.fullname" . }}-discovery
+ replicas: {{- if .Values.solace.redundancy }} 3 {{- else }} 1 {{- end }}
+ podManagementPolicy: Parallel
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Values.solace.podModifierEnabled }}
+ annotations:
+ pod-modifier.solace.com/modify: "true"
+ pod-modifier.solace.com/modify.podDefinition: |
+ {"Pods":[{"metadata":{"name":"{{ template "solace.fullname" . }}-2"},"spec":{"containers": [{"name": "pubsubplus","resources": {"requests": {"cpu": "1","memory": "1965Mi"},"limits": {"cpu": "1","memory": "1965Mi"} }} ] } } ]}
+{{- end}}
+ spec:
+{{- if .Values.image.pullSecretName }}
+ imagePullSecrets:
+ - name: {{ .Values.image.pullSecretName}}
+{{- end}}
+{{- if .Values.securityContext.enabled }}
+ securityContext:
+ fsGroup: {{ .Values.securityContext.fsGroup | int64 }}
+ {{- if and (.Values.securityContext.runAsUser) }}
+ runAsUser: {{ .Values.securityContext.runAsUser | int64 }}
+ {{- end }}
+{{- end }}
+ serviceAccountName: {{ template "solace.serviceAccountName" . }}
+ terminationGracePeriodSeconds: 1200
+ containers:
+ - name: pubsubplus
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy | default "IfNotPresent" }}
+ resources:
+ requests:
+{{- if .Values.solace.systemScaling }}
+ cpu: {{ required "A valid cpu parameter required!" .Values.solace.systemScaling.cpu | quote }}
+ memory: {{ required "A valid memory parameter required!" .Values.solace.systemScaling.memory }}
+{{- else if eq .Values.solace.size "dev" }}
+ cpu: "1"
+ memory: 3410Mi
+{{- else if eq .Values.solace.size "prod100" }}
+ cpu: "2"
+ memory: 4025Mi
+{{- else if eq .Values.solace.size "prod1k" }}
+ cpu: "2"
+ memory: 6515Mi
+{{- else if eq .Values.solace.size "prod10k" }}
+ cpu: "4"
+ memory: 12435Mi
+{{- else if eq .Values.solace.size "prod100k" }}
+ cpu: "8"
+ memory: 30925Mi
+{{- else if eq .Values.solace.size "prod200k" }}
+ cpu: "12"
+ memory: 52581Mi
+{{- else }}
+ {{- fail "Invalid solace.size" }}
+{{- end }}
+ limits:
+{{- if .Values.solace.systemScaling }}
+ cpu: {{ .Values.solace.systemScaling.cpu | quote }}
+ memory: {{ .Values.solace.systemScaling.memory }}
+{{- else if eq .Values.solace.size "dev" }}
+ cpu: "2"
+ memory: 3410Mi
+{{- else if eq .Values.solace.size "prod100" }}
+ cpu: "2"
+ memory: 4025Mi
+{{- else if eq .Values.solace.size "prod1k" }}
+ cpu: "2"
+ memory: 6515Mi
+{{- else if eq .Values.solace.size "prod10k" }}
+ cpu: "4"
+ memory: 12435Mi
+{{- else if eq .Values.solace.size "prod100k" }}
+ cpu: "8"
+ memory: 30925Mi
+{{- else if eq .Values.solace.size "prod200k" }}
+ cpu: "12"
+ memory: 52581Mi
+{{- end }}
+ livenessProbe:
+ tcpSocket:
+ port: 8080
+ initialDelaySeconds: 300
+ timeoutSeconds: 5
+ readinessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ exec:
+ command:
+ - /mnt/disks/solace/readiness_check.sh
+ securityContext:
+ privileged: false
+{{- if semverCompare "<9.4" (default "9.4" (regexFind "\\d+\\.\\d+" .Values.image.tag)) }}
+ # need capabilities opened up if PubSub+ version is lower than 9.4
+ capabilities:
+ add:
+ - IPC_LOCK
+ - SYS_NICE
+{{- end }}
+ env:
+ - name: STATEFULSET_NAME
+ value: {{ template "solace.fullname" . }}
+ - name: STATEFULSET_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: TZ
+ value: {{ printf "%s/%s" ":/usr/share/zoneinfo" (default "UTC" .Values.solace.timezone) }}
+ - name: UMASK
+ value: "0022"
+{{- if .Values.solace.extraEnvVars }}
+ {{- range $item := .Values.solace.extraEnvVars }}
+ - name: {{ $item.name }}
+ value: {{ $item.value | quote }}
+ {{- end }}
+{{- end }}
+{{- if or .Values.solace.extraEnvVarsCM .Values.solace.extraEnvVarsSecret }}
+ envFrom:
+ {{- if .Values.solace.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ tpl .Values.solace.extraEnvVarsCM . | quote }}
+ {{- end }}
+ {{- if .Values.solace.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ tpl .Values.solace.extraEnvVarsSecret . | quote }}
+ {{- end }}
+{{- end }}
+ command:
+ - bash
+ - "-ec"
+ - |
+ source /mnt/disks/solace/init.sh
+ # not using postinstall hooks because of order dependencies
+ # launch config check - readiness check script will be launched by readinessProbe
+ nohup /mnt/disks/solace/startup-broker.sh &
+ /usr/sbin/boot.sh
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - bash
+ - "-ec"
+ - |
+ while ! pgrep solacedaemon ; do sleep 1; done
+ killall solacedaemon;
+ while [ ! -d /usr/sw/var/db.upgrade ]; do sleep 1; done;
+ ports:
+ {{- range $item := .Values.service.ports }}
+ - containerPort: {{ $item.containerPort }}
+ protocol: {{ $item.protocol }}
+ {{- end}}
+ volumeMounts:
+ - name: podinfo
+ mountPath: /etc/podinfo
+ - name: config-map
+ mountPath: /mnt/disks/solace
+ - name: secrets
+ mountPath: /mnt/disks/secrets
+ readOnly: true
+{{- if and (.Values.tls) (.Values.tls.enabled) }}
+ - name: server-certs
+ mountPath: /mnt/disks/certs/server
+ readOnly: true
+{{- end }}
+ - name: dshm
+ mountPath: /dev/shm
+{{- if and (.Values.storage) (.Values.storage.useStorageGroup) }}
+ # use single mount point storage-group
+ - name: data
+ mountPath: /var/lib/solace
+ {{- if ( .Values.storage.slow) }}
+ # mount ephemeral when using slow storage
+ - name: soft-adb-ephemeral
+ mountPath: /var/lib/solace/spool-cache
+ {{- end }}
+{{- else }}
+ # use legacy multiple storage elements
+ - name: data
+ mountPath: /usr/sw/jail
+ subPath: jail
+ - name: data
+ mountPath: /usr/sw/var
+ subPath: var
+ - name: data
+ mountPath: /usr/sw/internalSpool
+ subPath: internalSpool
+ - name: data
+ mountPath: /usr/sw/adb
+ subPath: adb
+ - name: data
+ mountPath: /var/lib/solace/diags
+ subPath: diags
+ {{- if (not .Values.storage.slow) }}
+ - name: data
+ mountPath: /usr/sw/internalSpool/softAdb
+ subPath: softAdb
+ {{- else }}
+ # mount ephemeral when using slow storage
+ - name: soft-adb-ephemeral
+ mountPath: /usr/sw/internalSpool/softAdb
+ {{- end }}
+{{- end }}
+ volumes:
+ - name: podinfo
+ downwardAPI:
+ items:
+ - path: "labels"
+ fieldRef:
+ fieldPath: metadata.labels
+ - name: config-map
+ configMap:
+ name: {{ template "solace.fullname" . }}
+ defaultMode: 0755
+ - name: secrets
+ secret:
+ secretName: {{ template "solace.fullname" . }}-secrets
+ defaultMode: 0400
+{{- if and (.Values.tls) (.Values.tls.enabled) }}
+ - name: server-certs
+ secret:
+ secretName: {{ required "A secret containing the server key and certificates is required when TLS in enabled" .Values.tls.serverCertificatesSecret }}
+ defaultMode: 0400
+{{- end }}
+ - name: dshm
+ emptyDir:
+ medium: Memory
+{{- if .Values.storage.slow }}
+ - name: soft-adb-ephemeral
+ emptyDir: {}
+{{- end }}
+{{- if and (.Values.storage) (not .Values.storage.persistent) }}
+ - name: data
+ emptyDir: {}
+{{- else if and (.Values.storage) (.Values.storage.customVolumeMount) }}
+ - name: data
+ {{- with .Values.storage.customVolumeMount }}
+{{ tpl . $ | indent 10 }}
+ {{- end }}
+{{- else }}
+ # This is the default way to acquire volume for the data mount
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ {{- if .Values.storage.useStorageClass }}
+ storageClassName: {{ .Values.storage.useStorageClass }}
+ {{- end}}
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: {{ .Values.storage.size}}
+{{- end }}
diff --git a/pubsubplus/templates/tests/test-semp-connection.yaml b/pubsubplus/templates/tests/test-semp-connection.yaml
index 22bb9521..576dcb89 100644
--- a/pubsubplus/templates/tests/test-semp-connection.yaml
+++ b/pubsubplus/templates/tests/test-semp-connection.yaml
@@ -1,52 +1,52 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: "{{ include "solace.fullname" . }}-test"
- labels:
- app.kubernetes.io/name: {{ template "solace.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
- annotations:
- "helm.sh/hook": test
-spec:
-{{- if .Values.image.pullSecretName }}
- imagePullSecrets:
- - name: {{ .Values.image.pullSecretName}}
-{{- end}}
-{{- if .Values.securityContext.enabled }}
- securityContext:
- fsGroup: {{ .Values.securityContext.fsGroup | int64 }}
- {{- if and (.Values.securityContext.runAsUser) }}
- runAsUser: {{ .Values.securityContext.runAsUser | int64 }}
- {{- end }}
-{{- end }}
- containers:
- - name: {{ template "solace.fullname" . }}-test
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
- imagePullPolicy: {{ .Values.image.pullPolicy | default "IfNotPresent" }}
- env:
- - name: SOLACE_HOST
- value: {{ template "solace.fullname" . }}
- - name: SOLACE_PASSWORD
- valueFrom:
- secretKeyRef:
- name: {{ template "solace.fullname" . }}-secrets
- key: username_admin_password
- - name: PORT_MAPPINGS
- value: "{{ .Values.service.ports | toString }}"
- command:
- - /bin/bash
- - -c
- - |
- # Get tcp-semp port out of PORT_MAPPINGS
- portmappings_array=(`awk -F']' '{ for(i=1;i<=NF;i++) print $i }' <<< $PORT_MAPPINGS | grep "tcp-semp"`)
- for i in ${portmappings_array[@]}; do if [[ "$i" == *"servicePort"* ]]; then SEMP_PORT="$(cut -d':' -f2 <<<$i)"; fi ; done
- echo "SEMP port: $SEMP_PORT"
- echo "Checking for successful SEMP access"
- if curl --write-out '%{http_code}' -u admin:$SOLACE_PASSWORD $SOLACE_HOST:$SEMP_PORT/SEMP | grep 200
- then echo "SEMP access successful"
- else echo "SEMP access failed"; exit 1
- fi
- exit 0
- restartPolicy: Never
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "solace.fullname" . }}-test"
+ labels:
+ app.kubernetes.io/name: {{ template "solace.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+{{- if .Values.image.pullSecretName }}
+ imagePullSecrets:
+ - name: {{ .Values.image.pullSecretName}}
+{{- end}}
+{{- if .Values.securityContext.enabled }}
+ securityContext:
+ fsGroup: {{ .Values.securityContext.fsGroup | int64 }}
+ {{- if and (.Values.securityContext.runAsUser) }}
+ runAsUser: {{ .Values.securityContext.runAsUser | int64 }}
+ {{- end }}
+{{- end }}
+ containers:
+ - name: {{ template "solace.fullname" . }}-test
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy | default "IfNotPresent" }}
+ env:
+ - name: SOLACE_HOST
+ value: {{ template "solace.fullname" . }}
+ - name: SOLACE_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "solace.fullname" . }}-secrets
+ key: username_admin_password
+ - name: PORT_MAPPINGS
+ value: "{{ .Values.service.ports | toString }}"
+ command:
+ - /bin/bash
+ - -c
+ - |
+ # Get tcp-semp port out of PORT_MAPPINGS
+ portmappings_array=(`awk -F']' '{ for(i=1;i<=NF;i++) print $i }' <<< $PORT_MAPPINGS | grep "tcp-semp"`)
+ for i in ${portmappings_array[@]}; do if [[ "$i" == *"servicePort"* ]]; then SEMP_PORT="$(cut -d':' -f2 <<<$i)"; fi ; done
+ echo "SEMP port: $SEMP_PORT"
+ echo "Checking for successful SEMP access"
+ if curl --write-out '%{http_code}' -u admin:$SOLACE_PASSWORD $SOLACE_HOST:$SEMP_PORT/SEMP | grep 200
+ then echo "SEMP access successful"
+ else echo "SEMP access failed"; exit 1
+ fi
+ exit 0
+ restartPolicy: Never
diff --git a/pubsubplus/values.openshift.yaml b/pubsubplus/values.openshift.yaml
index 923594c7..1f69587c 100644
--- a/pubsubplus/values.openshift.yaml
+++ b/pubsubplus/values.openshift.yaml
@@ -1,15 +1,15 @@
-# These overrides are appropriate defaults for deploying this chart on OpenShift
-
-# This will reference a RedHat certified image when available
-# image:
- # Default repository is the public Solace DockerHub repo
- repository: registry.connect.redhat.com/solace/pubsubplus-standard
- # Recommending to use a specific version tag for production
- tag: latest
-
-securityContext:
- # securityContext.enabled=true sets the pod security context
- # to defined securityContext.fsGroup (required) and runAsUser (optional)
- # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- # OpenShift takes care of setting the pod security context (except for the "default" project, which use is not recommended)
+# These overrides are appropriate defaults for deploying this chart on OpenShift
+
+# This will reference a RedHat certified image when available
+# image:
+ # Default repository is the public Solace DockerHub repo
+ repository: registry.connect.redhat.com/solace/pubsubplus-standard
+ # Recommending to use a specific version tag for production
+ tag: latest
+
+securityContext:
+ # securityContext.enabled=true sets the pod security context
+ # to defined securityContext.fsGroup (required) and runAsUser (optional)
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ # OpenShift takes care of setting the pod security context (except for the "default" project, which use is not recommended)
enabled: false
\ No newline at end of file
diff --git a/pubsubplus/values.schema.json b/pubsubplus/values.schema.json
index fde7abd6..89834eb7 100644
--- a/pubsubplus/values.schema.json
+++ b/pubsubplus/values.schema.json
@@ -1,156 +1,162 @@
-{
- "$schema": "http://json-schema.org/schema#",
- "type": "object",
- "properties": {
- "fullnameOverride": {
- "type": "string"
- },
- "image": {
- "type": "object",
- "properties": {
- "pullPolicy": {
- "type": "string"
- },
- "pullSecretName": {
- "type": "string"
- },
- "repository": {
- "type": "string"
- },
- "tag": {
- "type": "string"
- }
- }
- },
- "nameOverride": {
- "type": "string"
- },
- "securityContext": {
- "type": "object",
- "properties": {
- "enabled": {
- "type": "boolean"
- },
- "fsGroup": {
- "type": "integer"
- },
- "runAsUser": {
- "type": "integer"
- }
- }
- },
- "service": {
- "type": "object",
- "properties": {
- "annotations": {
- "type": "string"
- },
- "ports": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "containerPort": {
- "type": "integer"
- },
- "name": {
- "type": "string"
- },
- "protocol": {
- "type": "string"
- },
- "servicePort": {
- "type": "integer"
- }
- }
- }
- },
- "type": {
- "type": "string"
- }
- }
- },
- "serviceAccount": {
- "type": "object",
- "properties": {
- "create": {
- "type": "boolean"
- }
- }
- },
- "solace": {
- "type": "object",
- "properties": {
- "extraEnvVars": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "value": {
- "type": "string"
- }
- }
- }
- },
- "extraEnvVarsCM": {
- "type": "string"
- },
- "extraEnvVarsSecret": {
- "type": "string"
- },
- "redundancy": {
- "type": "boolean"
- },
- "size": {
- "type": "string"
- },
- "timezone": {
- "type": "string"
- },
- "usernameAdminPassword": {
- "type": ["string", "null"]
- }
- }
- },
- "storage": {
- "type": "object",
- "properties": {
- "customVolumeMount": {
- "type": "string"
- },
- "persistent": {
- "type": "boolean"
- },
- "size": {
- "type": "string"
- },
- "slow": {
- "type": "boolean"
- },
- "useStorageClass": {
- "type": "string"
- }
- }
- },
- "tls": {
- "type": "object",
- "properties": {
- "certFilename": {
- "type": "string"
- },
- "certKeyFilename": {
- "type": "string"
- },
- "enabled": {
- "type": "boolean"
- },
- "serverCertificatesSecret": {
- "type": "string"
- }
- }
- }
- }
-}
+{
+ "$schema": "http://json-schema.org/schema#",
+ "type": "object",
+ "properties": {
+ "fullnameOverride": {
+ "type": "string"
+ },
+ "image": {
+ "type": "object",
+ "properties": {
+ "pullPolicy": {
+ "type": "string"
+ },
+ "pullSecretName": {
+ "type": "string"
+ },
+ "repository": {
+ "type": "string"
+ },
+ "tag": {
+ "type": "string"
+ }
+ }
+ },
+ "nameOverride": {
+ "type": "string"
+ },
+ "securityContext": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean"
+ },
+ "fsGroup": {
+ "type": "integer"
+ },
+ "runAsUser": {
+ "type": "integer"
+ }
+ }
+ },
+ "service": {
+ "type": "object",
+ "properties": {
+ "annotations": {
+ "type": "object"
+ },
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "containerPort": {
+ "type": "integer"
+ },
+ "name": {
+ "type": "string"
+ },
+ "protocol": {
+ "type": "string"
+ },
+ "servicePort": {
+ "type": "integer"
+ }
+ }
+ }
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
+ "serviceAccount": {
+ "type": "object",
+ "properties": {
+ "create": {
+ "type": "boolean"
+ }
+ }
+ },
+ "solace": {
+ "type": "object",
+ "properties": {
+ "extraEnvVars": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "extraEnvVarsCM": {
+ "type": "string"
+ },
+ "extraEnvVarsSecret": {
+ "type": "string"
+ },
+ "redundancy": {
+ "type": "boolean"
+ },
+ "size": {
+ "type": "string"
+ },
+ "timezone": {
+ "type": "string"
+ },
+ "usernameAdminPassword": {
+ "type": ["string", "null"]
+ }
+ }
+ },
+ "storage": {
+ "type": "object",
+ "properties": {
+ "customVolumeMount": {
+ "type": "string"
+ },
+ "persistent": {
+ "type": "boolean"
+ },
+ "size": {
+ "type": "string"
+ },
+ "monitorStorageSize": {
+ "type": "string"
+ },
+ "slow": {
+ "type": "boolean"
+ },
+ "useStorageGroup": {
+ "type": "boolean"
+ },
+ "useStorageClass": {
+ "type": "string"
+ }
+ }
+ },
+ "tls": {
+ "type": "object",
+ "properties": {
+ "certFilename": {
+ "type": "string"
+ },
+ "certKeyFilename": {
+ "type": "string"
+ },
+ "enabled": {
+ "type": "boolean"
+ },
+ "serverCertificatesSecret": {
+ "type": "string"
+ }
+ }
+ }
+ }
+}
diff --git a/pubsubplus/values.yaml b/pubsubplus/values.yaml
index 594cf6f0..3679b4a9 100644
--- a/pubsubplus/values.yaml
+++ b/pubsubplus/values.yaml
@@ -1,185 +1,217 @@
-# Notes: ensure to keep the schema 'values.schema.json' in sync with this file
-
-# If defined Kubernetes object names will be -nameOverride
-# Default is -
-# nameOverride:
-
-# If defined "fullnameOverride" will fully override Kubernetes object names
-# fullnameOverride:
-
-solace:
- # solace.redundancy=false will create a single-node non-HA deployment;
- # true will create an HA deployment with Primary, Backup and Monitor nodes.
- redundancy: false
-
- # size defines deployment scaling tier with max # of connections, see README for valid options
- # dev uses minimum resources but performance is not guaranteed; use prod for production-ready
- size: prod100
-
- # usernameAdminPassword sets the password for the management user "admin".
- # If empty, a password will be autogenerated but note that for upgrade you need to
- # obtain the generated password and provide it for each upgrade.
- # Obtain the generated password from the deployment using
- # kubectl get secret -solace-secrets -o jsonpath="{.data.username_admin_password}" | base64 --decode
- usernameAdminPassword:
-
- # timezone setting for the PubSub+ container, if undefined default is UTC. Valid values are tz database time zone names.
- # timezone: UTC
-
- # extraEnvVars can be used to add extra environment variables to the PubSub+ container
- # Important: env variables defined here will not override the ones defined in solaceConfigMap
- # A primary use case is to specify configuration keys (Note that configuration keys are only evaluated upon initial startup)
- # refer to: https://docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/Docker-Tasks/Config-SW-Broker-Container-Cfg-Keys.htm
- #extraEnvVars:
- # - name: env_variable_name
- # value: "env_variable_value"
-
- # extraEnvVarsCM specifies the name of existing ConfigMap containing extra environment variables
- #extraEnvVarsCM: ""
-
- # extraEnvVarsSecret specifies the name of existing Secret containing extra environment variables (in case of sensitive data)
- #extraEnvVarsSecret: ""
-
-image:
- # Default repository
- repository: solace/solace-pubsub-standard
- # Recommending to use a specific version tag for production
- tag: latest
-
- # Specify if not using default IfNotPresent
- # pullPolicy: IfNotPresent
-
- # Provide an existing ImagePullSecret's name if using secure image repo.
- # pullSecretName:
-
-securityContext:
- # securityContext.enabled=true sets the pod security context
- # to defined securityContext.fsGroup (required) and runAsUser (optional)
- # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- enabled: true
- # fsGroup: expecting a number
- fsGroup: 1000002
- # runAsUser: expecting a number
- runAsUser: 1000001
-
-serviceAccount:
- # create=true will create a service account to be used for the deployment
- create: true
- # name if specified will be used as service account name - must exist if create=false
- # name:
-
-tls:
- # Enable TLS, default is false (not enabled)
- enabled: false
- #
- # Name of the Secret that contains the certificates - required if TLS enabled, no default
- # serverCertificatesSecret: 'example-tls-secret'
- #
- # Certificate filename, default tls.crt
- # certFilename: 'tls.crt'
- #
- # Certificate Key filename, default tls.key
- # certKeyFilename: 'tls.key'
-
-service:
- # service.type specifies how to expose the service: options include ClusterIP, NodePort, LoadBalancer (default if not specified)
- type: LoadBalancer
-
- # service.annotations allows to add provider-specific service annotations, see example below
- # annotations: 'cloud.google.com/load-balancer-type: "internal"'
-
- # List here all ports to be exposed as external service.
- # "containerPorts" (from the PubSub+ container) will be exposed as external "servicePorts",
- # Refer to the Solace documentation of Default Port Numbers for the containerPorts.
- ports:
- - servicePort: 2222
- containerPort: 2222
- protocol: TCP
- name: tcp-ssh
- - servicePort: 8080
- containerPort: 8080
- protocol: TCP
- name: tcp-semp
- - servicePort: 1943
- containerPort: 1943
- protocol: TCP
- name: tls-semp
- - servicePort: 55555
- containerPort: 55555
- protocol: TCP
- name: tcp-smf
- - servicePort: 55003
- containerPort: 55003
- protocol: TCP
- name: tcp-smfcomp
- - servicePort: 55443
- containerPort: 55443
- protocol: TCP
- name: tls-smf
- - servicePort: 55556
- containerPort: 55556
- protocol: TCP
- name: tcp-smfroute
- - servicePort: 8008
- containerPort: 8008
- protocol: TCP
- name: tcp-web
- - servicePort: 1443
- containerPort: 1443
- protocol: TCP
- name: tls-web
- - servicePort: 9000
- containerPort: 9000
- protocol: TCP
- name: tcp-rest
- - servicePort: 9443
- containerPort: 9443
- protocol: TCP
- name: tls-rest
- - servicePort: 5672
- containerPort: 5672
- protocol: TCP
- name: tcp-amqp
- - servicePort: 5671
- containerPort: 5671
- protocol: TCP
- name: tls-amqp
- - servicePort: 1883
- containerPort: 1883
- protocol: TCP
- name: tcp-mqtt
- - servicePort: 8883
- containerPort: 8883
- protocol: TCP
- name: tls-mqtt
- - servicePort: 8000
- containerPort: 8000
- protocol: TCP
- name: tcp-mqttweb
- - servicePort: 8443
- containerPort: 8443
- protocol: TCP
- name: tls-mqttweb
-
-storage:
- # storage.persistent set to false will use ephemeral storage and the rest of the storage params will be ignored
- # false is not recommended for production use
- persistent: true
-
- # storage.customVolumeMount enables specifying a YAML fragment how the data volume should be mounted.
- # If customVolumeMount is defined the rest of the storage params will be ignored
- # This example shows how to mount the PubSub+ data volume from an existing pvc "test-claim". Ensure to preserve indentation.
- # customVolumeMount: |
- # persistentVolumeClaim:
- # claimName: existing-pvc-name
-
- # storage.slow=true to indicate slow storage used, must be set for NFS. Default is false.
- # slow: true
-
- # storage.useStorageClass will be used if specified, verify it exists using `kubectl get sc`.
- # If not defined, the deployment will try to use the default storage class for the k8s cluster.
- # useStorageClass: standard
-
- # Refer to the Solace documentation for Minimum Recommended Storage-Element Size per Scaling Tier
- size: 30Gi
-
+# Notes: ensure to keep the schema 'values.schema.json' in sync with this file
+
+# If defined Kubernetes object names will be -nameOverride
+# Default is -
+# nameOverride:
+
+# If defined "fullnameOverride" will fully override Kubernetes object names
+# fullnameOverride:
+
+solace:
+ # solace.redundancy=false will create a single-node non-HA deployment;
+ # true will create an HA deployment with Primary, Backup and Monitoring nodes.
+ redundancy: false
+
+ # solace.systemScaling provides exact fine-grained specification of the event broker scaling parameters
+ # and the assigned CPU / memory resources to the Pod
+ # if provided, these settings will OVERRIDE solace.size
+ # if provided, all parameters must be set
+ # For scaling documentation look for "system scaling" at docs.solace.com
+ # Use the online calculator for "Container (messaging)" to determine CPU, Memory and Storage requirements
+ # https://docs.solace.com/Assistance-Tools/Resource-Calculator/pubsubplus-resource-calculator.html
+ # systemScaling:
+ # # maxConnections: max supported number of client connections
+ # maxConnections: 100
+ # # maxQueueMessages: number of queue messages, in millions of messages
+ # maxQueueMessages: 100
+ # # maxSpoolUsage: max Spool Usage, in MB. Also ensure adequate storage.size parameter, use the calculator
+ # maxSpoolUsage: 1500
+ # # cpu: CPUs in cores
+ # cpu: 2
+ # # memory: host Virtual Memory
+ # memory: 3410Mi
+
+ # solace.podModifierEnabled=true enables modifying (reducing) CPU and memory resources for Monitoring nodes in an HA deployment
+ # this must be provided and set to true to enable adjustments by the solace-pod-modifier admission plugin
+ # podModifierEnabled: true
+
+ # solace.size defines deployment scaling tier with max # of connections, see README for valid options
+ # dev uses minimum resources but performance is not guaranteed; use prod for production-ready
+ # solace.size is IGNORED if solace.systemScaling is provided.
+ size: prod100
+
+ # solace.usernameAdminPassword sets the password for the management user "admin".
+ # If empty, a password will be autogenerated but note that for upgrade you need to
+ # obtain the generated password and provide it for each upgrade.
+ # Obtain the generated password from the deployment using
+ # kubectl get secret -solace-secrets -o jsonpath="{.data.username_admin_password}" | base64 --decode
+ usernameAdminPassword:
+
+ # solace.timezone setting for the PubSub+ container, if undefined default is UTC. Valid values are tz database time zone names.
+ # timezone: UTC
+
+ # solace.extraEnvVars can be used to add extra environment variables to the PubSub+ container
+ # Important: env variables defined here will not override the ones defined in solaceConfigMap
+ # A primary use case is to specify configuration keys (Note that configuration keys are only evaluated upon initial startup)
+ # refer to: https://docs.solace.com/Configuring-and-Managing/SW-Broker-Specific-Config/Docker-Tasks/Config-SW-Broker-Container-Cfg-Keys.htm
+ #extraEnvVars:
+ # - name: env_variable_name
+ # value: "env_variable_value"
+
+ # solace.extraEnvVarsCM specifies the name of existing ConfigMap containing extra environment variables
+ #extraEnvVarsCM: ""
+
+ # solace.extraEnvVarsSecret specifies the name of existing Secret containing extra environment variables (in case of sensitive data)
+ #extraEnvVarsSecret: ""
+
+image:
+ # Default repository
+ repository: solace/solace-pubsub-standard
+ # Recommending to use a specific version tag for production
+ tag: latest
+
+ # Specify if not using default IfNotPresent
+ # pullPolicy: IfNotPresent
+
+ # Provide an existing ImagePullSecret's name if using secure image repo.
+ # pullSecretName:
+
+securityContext:
+ # securityContext.enabled=true sets the pod security context
+ # to defined securityContext.fsGroup (required) and runAsUser (optional)
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ enabled: true
+ # fsGroup: expecting a number
+ fsGroup: 1000002
+ # runAsUser: expecting a number
+ runAsUser: 1000001
+
+serviceAccount:
+ # create=true will create a service account to be used for the deployment
+ create: true
+ # name if specified will be used as service account name - must exist if create=false
+ # name:
+
+tls:
+ # Enable TLS, default is false (not enabled)
+ enabled: false
+ #
+ # Name of the Secret that contains the certificates - required if TLS enabled, no default
+ # serverCertificatesSecret: 'example-tls-secret'
+ #
+ # Certificate filename, default tls.crt
+ # certFilename: 'tls.crt'
+ #
+ # Certificate Key filename, default tls.key
+ # certKeyFilename: 'tls.key'
+
+service:
+ # service.type specifies how to expose the service: options include ClusterIP, NodePort, LoadBalancer (default if not specified)
+ type: LoadBalancer
+
+ # service.annotations allows to add provider-specific service annotations, see example below
+ #annotations:
+ # networking.gke.io/load-balancer-type: "internal"
+
+ # List here all ports to be exposed as external service.
+ # "containerPorts" (from the PubSub+ container) will be exposed as external "servicePorts",
+ # Refer to the Solace documentation of Default Port Numbers for the containerPorts.
+ ports:
+ - servicePort: 2222
+ containerPort: 2222
+ protocol: TCP
+ name: tcp-ssh
+ - servicePort: 8080
+ containerPort: 8080
+ protocol: TCP
+ name: tcp-semp
+ - servicePort: 1943
+ containerPort: 1943
+ protocol: TCP
+ name: tls-semp
+ - servicePort: 55555
+ containerPort: 55555
+ protocol: TCP
+ name: tcp-smf
+ - servicePort: 55003
+ containerPort: 55003
+ protocol: TCP
+ name: tcp-smfcomp
+ - servicePort: 55443
+ containerPort: 55443
+ protocol: TCP
+ name: tls-smf
+ - servicePort: 55556
+ containerPort: 55556
+ protocol: TCP
+ name: tcp-smfroute
+ - servicePort: 8008
+ containerPort: 8008
+ protocol: TCP
+ name: tcp-web
+ - servicePort: 1443
+ containerPort: 1443
+ protocol: TCP
+ name: tls-web
+ - servicePort: 9000
+ containerPort: 9000
+ protocol: TCP
+ name: tcp-rest
+ - servicePort: 9443
+ containerPort: 9443
+ protocol: TCP
+ name: tls-rest
+ - servicePort: 5672
+ containerPort: 5672
+ protocol: TCP
+ name: tcp-amqp
+ - servicePort: 5671
+ containerPort: 5671
+ protocol: TCP
+ name: tls-amqp
+ - servicePort: 1883
+ containerPort: 1883
+ protocol: TCP
+ name: tcp-mqtt
+ - servicePort: 8883
+ containerPort: 8883
+ protocol: TCP
+ name: tls-mqtt
+ - servicePort: 8000
+ containerPort: 8000
+ protocol: TCP
+ name: tcp-mqttweb
+ - servicePort: 8443
+ containerPort: 8443
+ protocol: TCP
+ name: tls-mqttweb
+
+storage:
+ # storage.persistent set to false will use ephemeral storage and the rest of the storage params will be ignored
+ # false is not recommended for production use
+ persistent: true
+
+ # storage.customVolumeMount enables specifying a YAML fragment how the data volume should be mounted.
+ # If customVolumeMount is defined the rest of the storage params will be ignored
+ # This example shows how to mount the PubSub+ data volume from an existing pvc "test-claim". Ensure to preserve indentation.
+ # customVolumeMount: |
+ # persistentVolumeClaim:
+ # claimName: existing-pvc-name
+
+ # storage.slow=true to indicate slow storage used, must be set for NFS. Default is false.
+ # slow: true
+
+ # storage.useStorageClass will be used if specified, verify it exists using `kubectl get sc`.
+ # If not defined, the deployment will try to use the default storage class for the k8s cluster.
+ # useStorageClass: standard
+
+ # storage.size: refer to the System Resource Requirements in Solace documentation for minimum storage requirements
+ size: 30Gi
+
+ # storage.monitorStorageSize: if provided this will create and assign the minimum recommended storage to Monitor pods. For initial deployments only.
+ monitorStorageSize: 1500M
+
+ #storage.useStorageGroup: use a single mount point storage-group, as recommended from PubSub+ version 9.12
+ # if undefined or set to false, the legacy behavior is to individually mount storage-elements in subdirectories.
+ # Note: legacy mount still works for broker version 9.12 and later but may be deprecated in the future.
+ # useStorageGroup: true
diff --git a/solace-pod-modifier-admission-plugin/.dockerignore b/solace-pod-modifier-admission-plugin/.dockerignore
new file mode 100644
index 00000000..4a146ece
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/.dockerignore
@@ -0,0 +1,13 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+vendor/*
diff --git a/solace-pod-modifier-admission-plugin/.gitignore b/solace-pod-modifier-admission-plugin/.gitignore
new file mode 100644
index 00000000..4b3efd39
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/.gitignore
@@ -0,0 +1,18 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+vendor/*
+
+build/_output
+
+# GOPATH
+.go
\ No newline at end of file
diff --git a/solace-pod-modifier-admission-plugin/Dockerfile b/solace-pod-modifier-admission-plugin/Dockerfile
new file mode 100644
index 00000000..0848151c
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/Dockerfile
@@ -0,0 +1,34 @@
+# Build the pod-modifier binary
+FROM golang:1.17 as builder
+
+WORKDIR /workspace
+# Copy the Go Modules manifests
+COPY go.mod go.mod
+COPY go.sum go.sum
+# cache deps before building and copying source so that we don't need to re-download as much
+# and so that source changes don't invalidate our downloaded layer
+RUN go mod download
+
+# Copy the go source
+COPY cmd/ cmd/
+
+# Build
+RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o pod-modifier ./cmd
+
+
+FROM alpine:latest
+
+# install curl for prestop script
+RUN apk --no-cache add curl
+
+WORKDIR /
+
+# install binary
+COPY --from=builder /workspace/pod-modifier .
+
+# install the prestop script
+COPY ./prestop.sh .
+
+USER 65532:65532
+
+ENTRYPOINT ["/pod-modifier"]
diff --git a/solace-pod-modifier-admission-plugin/LICENSE b/solace-pod-modifier-admission-plugin/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/solace-pod-modifier-admission-plugin/Makefile b/solace-pod-modifier-admission-plugin/Makefile
new file mode 100644
index 00000000..eff617c2
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/Makefile
@@ -0,0 +1,84 @@
+# Setting SHELL to bash allows bash commands to be executed by recipes.
+# This is a requirement for 'setup-envtest.sh' in the test target.
+# Options are set to exit when a recipe line exits non-zero or a piped command fails.
+SHELL = /usr/bin/env bash -o pipefail
+.SHELLFLAGS = -ec
+
+# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
+ifeq (,$(shell go env GOBIN))
+GOBIN=$(shell go env GOPATH)/bin
+else
+GOBIN=$(shell go env GOBIN)
+endif
+
+# Tools for deploy
+KUBECTL?=kubectl # Use `oc` for OpenShift
+#KUBECTL?=oc
+CONTAINER_TOOL?=docker # `podman` if alternative
+#CONTAINER_TOOL?=podman
+KUSTOMIZE?=$(shell pwd)/bin/kustomize
+
+# No default for image
+IMAGE ?= $(error IMAGE not defined, please set IMAGE=)
+
+all: build
+.PHONY: all
+
+##@ General
+
+# The help target prints out all targets with their descriptions organized
+# beneath their categories. The categories are represented by '##@' and the
+# target descriptions by '##'. The awk commands is responsible for reading the
+# entire set of makefiles included in this invocation, looking for lines of the
+# file as xyz: ## something, and then pretty-format the target and help. Then,
+# if there's a line with ##@ something, that gets pretty-printed as a category.
+# More info on the usage of ANSI control characters for terminal formatting:
+# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
+# More info on the awk command:
+# http://linuxcommand.org/lc3_adv_awk.php
+
+.PHONY: help
+help: ## Display this help
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+##@ Development
+
+.PHONY: fmt
+fmt: ## Run go fmt against code
+ go fmt ./...
+
+.PHONY: vet
+vet: ## Run go vet against code
+ go vet ./...
+
+##@ Build
+
+.PHONY: build
+build: fmt vet ## Build binary
+ go build -o bin/pod-modifier ./cmd/
+
+.PHONY: image-build
+image-build: build ## Build container image
+ $(CONTAINER_TOOL) build -t ${IMAGE} .
+
+.PHONY: image-push
+image-push: ## Push container image
+ $(CONTAINER_TOOL) push ${IMAGE}
+
+##@ Deployment
+
+.PHONY: deploy
+deploy: ## Deploy Solace pod-modifier mutating admission webhook including related artifacts
+ cp deploy/kustomization.yaml deploy/kustomization.yaml.tmp
+ cd deploy && $(KUSTOMIZE) edit set image pod-modifier=$(IMAGE)
+ $(KUSTOMIZE) build deploy | $(KUBECTL) apply -f -
+ mv deploy/kustomization.yaml.tmp deploy/kustomization.yaml
+
+.PHONY: undeploy
+undeploy: ## Undeploy webhook including related artifacts
+ $(KUSTOMIZE) build deploy | $(KUBECTL) delete --ignore-not-found -f -
+
+.PHONY: setup-kustomize
+setup-kustomize: ## Download and setup Kustomize tool locally if missing
+# Refer to https://kubectl.docs.kubernetes.io/installation/kustomize/binaries/ - script does not work for ARM architecture
+ (if ! test -f $(KUSTOMIZE); then cd bin && curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash; fi)
diff --git a/solace-pod-modifier-admission-plugin/README.md b/solace-pod-modifier-admission-plugin/README.md
new file mode 100644
index 00000000..f928de77
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/README.md
@@ -0,0 +1,171 @@
+# "solace-pod-modifier" Admission Plugin
+
+This project provides an [admission plugin](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) extension to Kubernetes v1.16 or later, to support reducing the resource requirements of PubSub+ Monitoring Nodes in an HA deployment.
+
+Contents:
+ * [Overview](#overview)
+ * [Security considerations](#security-considerations)
+ * [Building and Deployment](#building-and-deployment)
+ + [Project structure](#project-structure)
+ + [Tool pre-requisites](#tool-pre-requisites)
+ + [Build and deploy steps](#build-and-deploy-steps)
+ * [How to use](#how-to-use)
+ * [Troubleshooting](#troubleshooting)
+
+## Overview
+
+"solace-pod-modifier" implements a web server acting as a ["MutatingAdmissionWebhook" admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/). Support for both building and deployment is provided.
+
+When deployed it can intercept Pod create requests in enabled namespaces and can alter resource specifications of designated Pods before they are persisted.
+
+This project is a fork of https://github.com/morvencao/kube-sidecar-injector, which may be consulted for additional details.
+
+## Security considerations
+
+Admission plugins intercepting Kubernetes API requests introduce inherent security risks of making unintended modifications.
+Following safeguards are in place:
+- The webhook is deployed in a dedicated namespace, only cluster level Kubernetes admins have access to that
+- Webhook integrity: the webhook is packaged as a container image and a private repo can be used to serve it
+- The webhook code ensures:
+ * Only the "resources" of a pod can be modified, no other specs
+ * Only pods in specified namespaces can be modified - namespaces must be labelled `pod-modifier.solace.com=enabled`
+ * Only pods with annotation `pod-modifier.solace.com/modify: "true"` can be modified
+ * Only pods with name that meets the specified name in `pod-modifier.solace.com/modify.podDefinition` annotation : `{"Pods":[{"metadata":{"name":""}...` can be modified
+
+## Building and Deployment
+
+This project needs to be built first, which results in a container image of the webhook server in a specified repo. When deploying, a webhook pod will be created using the container image, a webhook service as an entry point for admission requests, and a `MutatingWebhookConfiguration` object, which is an Admission registration that specifies which webhook service to call and when.
+
+> Note: this release only supports one replica of the webhook pod running, more replicas will be supported in a later release.
+
+### Project structure
+
+The server is implemented as a Go language project in the `cmd` subdirectory:
+* `cmd/main.go`: entry point;
+* `cmd/webhook.go`: implements the mutating logic;
+* `cmd/cert.go`: creates a certificate to be used by admission requests from the Kubernetes controller and also gets it signed by the Kubernetes local CA. This is required at the Admission registration;
+* `cmd/webhookconfig`: creates an Admission registration.
+
+The Kubernetes deployment templates are provided in the `deploy` subdirectory:
+* `deploy/kustomization.yaml`: provides base configuration to be used by the Kustomize tool. "Kustomize" is a Kubernetes tool used to override template settings;
+* `deploy/deployment.yaml`: creates the webhook pod from the webhook server container image;
+* `deploy/service.yaml`: defines the webhook service pointing to the webhook pod;
+* `deploy/namespace.yaml`, `serviceaccount.yaml`, `clusterrole.yaml` and `clusterrolebinding.yaml` define a dedicated namespace and security settings for the deployment.
+
+`Dockerfile` provides the template to build the container image.
+
+`Makefile` defines the tasks related to building and deployment. Check `make help` for options.
+
+### Tool pre-requisites
+
+- [make](https://www.gnu.org/software/make/)
+- [git](https://git-scm.com/downloads)
+- [go](https://golang.org/dl/) version v1.17+
+- [docker](https://docs.docker.com/install/) version 19.03+, or [podman](https://podman.io/getting-started/installation)
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) version v1.19+
+- Access to the target Kubernetes v1.16+ cluster with the `admissionregistration.k8s.io/v1` API enabled. Verify that by the following command: `kubectl api-versions | grep admissionregistration.k8s.io
+`
+
+### Build and deploy steps
+
+1. Clone this project to your local machine
+
+```
+git clone https://github.com/SolaceProducts/pubsubplus-kubernetes-quickstart.git
+cd pubsubplus-kubernetes-quickstart/solace-pod-modifier-admission-plugin
+```
+
+2. Build and push container image:
+
+Decide in which image repo to host the built webhook container image, ensure write access to the repo, then substitute ``, `` and ``:
+```bash
+make image-build image-push IMAGE=/:
+```
+
+> **Important**: the image is based on `alpine:latest`. It is the builder's responsibility to keep updating the image for security patches etc. over time.
+
+3. Deploy the `solace-pod-modifier` to the Kubernetes cluster:
+
+```bash
+make deploy IMAGE=/:
+```
+
+Deployment options:
+* Container image: as above, the container image is directly provided in the `make` command
+* Image pull secrets: if required, uncomment and edit in `deploy/deployment.yaml`
+* Namespace name: default is `solace-pod-modifier`, adjust in `deploy/kustomization.yaml`. *Important:* if using OpenShift, do not use the `default` namespace (project).
+
+4. Verify the webhook pod is up and running in the `solace-pod-modifier` namespace and the `MutatingWebhookConfiguration` object has been created:
+
+```bash
+kubectl get pods -n solace-pod-modifier
+NAME READY STATUS RESTARTS AGE
+pod-modifier-webhook-deployment-d45f8b7dd-968gf 1/1 Running 0 30s
+
+kubectl get MutatingWebhookConfiguration
+NAME WEBHOOKS AGE
+...
+pod-modifier.solace.com 1 36s
+```
+
+## How to use
+
+With `solace-pod-modifier` [deployed](#build-and-deploy-steps),
+
+1. Label the namespace designated for PubSub+ HA deployment with `pod-modifier.solace.com=enabled`:
+
+```
+kubectl create ns test-ns
+kubectl label namespace test-ns pod-modifier.solace.com=enabled
+# kubectl get namespace -L pod-modifier.solace.com
+NAME STATUS AGE POD-MODIFIER.SOLACE.COM
+default Active 26m
+test-ns Active 13s enabled
+kube-public Active 26m
+kube-system Active 26m
+solace-pod-modifier Active 17m
+```
+
+2. Deploy PubSub+ HA
+
+```bash
+helm install my-ha-deployment solacecharts/pubsubplus \
+ --namespace test-ns \
+ --set solace.redundancy=true,solace.podModifierEnabled=true
+```
+
+3. Verify Monitoring node (ordinal: `-2`) CPU or memory resource requirements
+
+```
+kubectl get pods -n test-ns -o yaml | grep "pod-name\:\|memory\:"
+ statefulset.kubernetes.io/pod-name: my-ha-deployment-pubsubplus-0
+ memory: 3410Mi
+ memory: 3410Mi
+ statefulset.kubernetes.io/pod-name: my-ha-deployment-pubsubplus-1
+ memory: 3410Mi
+ memory: 3410Mi
+ statefulset.kubernetes.io/pod-name: my-ha-deployment-pubsubplus-2
+ memory: 1965Mi
+ memory: 1965Mi
+```
+
+## Troubleshooting
+
+If the Monitoring node specs were not reduced, check followings:
+
+1. The webhook pod is in running state and no error in the logs:
+```
+kubectl logs pod-modifier-webhook-deployment-d45f8b7dd-968gf -n solace-pod-modifier
+```
+
+2. The namespace in which the PubSub+ HA broker is deployed has the correct label (`pod-modifier.solace.com=enabled`) as configured in `MutatingWebhookConfiguration`.
+
+3. Check if the broker pods have both annotations
+* `pod-modifier.solace.com/inject: "true"`; and also
+* `pod-modifier.solace.com/modify.podDefinition: ...`
+
+4. Error message at Helm install or upgrade
+
+Generally, if encountered an error message about "failed calling webhook" at Helm install or upgrade then delete or rollback the Helm deployment just attempted without deleting related PVCs. Check above items are all in place and then retry it.
+
+
diff --git a/solace-pod-modifier-admission-plugin/bin/kustomize b/solace-pod-modifier-admission-plugin/bin/kustomize
new file mode 100755
index 00000000..a1424819
Binary files /dev/null and b/solace-pod-modifier-admission-plugin/bin/kustomize differ
diff --git a/solace-pod-modifier-admission-plugin/bin/pod-modifier b/solace-pod-modifier-admission-plugin/bin/pod-modifier
new file mode 100644
index 00000000..1b40fb9c
Binary files /dev/null and b/solace-pod-modifier-admission-plugin/bin/pod-modifier differ
diff --git a/solace-pod-modifier-admission-plugin/cmd/cert.go b/solace-pod-modifier-admission-plugin/cmd/cert.go
new file mode 100644
index 00000000..011b7d34
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/cmd/cert.go
@@ -0,0 +1,106 @@
+// solace-pod-modifier-admission-plugin
+//
+// Copyright 2021-2022 Solace Corporation. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "math/big"
+ "time"
+)
+
+// generateCert generate a self-signed CA for given organization
+// and sign certificate with the CA for given common name and dns names
+// it resurns the CA, certificate and private key in PEM format
+func generateCert(orgs, dnsNames []string, commonName string) (*bytes.Buffer, *bytes.Buffer, *bytes.Buffer, error) {
+ // init CA config
+ ca := &x509.Certificate{
+ SerialNumber: big.NewInt(2022),
+ Subject: pkix.Name{Organization: orgs},
+ NotBefore: time.Now(),
+ NotAfter: time.Now().AddDate(1, 0, 0), // expired in 1 year
+ IsCA: true,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ }
+
+ // generate private key for CA
+ caPrivateKey, err := rsa.GenerateKey(rand.Reader, 4096)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // create the CA certificate
+ caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivateKey.PublicKey, caPrivateKey)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // CA certificate with PEM encoded
+ caPEM := new(bytes.Buffer)
+ _ = pem.Encode(caPEM, &pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: caBytes,
+ })
+
+ // new certificate config
+ newCert := &x509.Certificate{
+ DNSNames: dnsNames,
+ SerialNumber: big.NewInt(1024),
+ Subject: pkix.Name{
+ CommonName: commonName,
+ Organization: orgs,
+ },
+ NotBefore: time.Now(),
+ NotAfter: time.Now().AddDate(1, 0, 0), // expired in 1 year
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsageDigitalSignature,
+ }
+
+ // generate new private key
+ newPrivateKey, err := rsa.GenerateKey(rand.Reader, 4096)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // sign the new certificate
+ newCertBytes, err := x509.CreateCertificate(rand.Reader, newCert, ca, &newPrivateKey.PublicKey, caPrivateKey)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ // new certificate with PEM encoded
+ newCertPEM := new(bytes.Buffer)
+ _ = pem.Encode(newCertPEM, &pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: newCertBytes,
+ })
+
+ // new private key with PEM encoded
+ newPrivateKeyPEM := new(bytes.Buffer)
+ _ = pem.Encode(newPrivateKeyPEM, &pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: x509.MarshalPKCS1PrivateKey(newPrivateKey),
+ })
+
+ return caPEM, newCertPEM, newPrivateKeyPEM, nil
+}
diff --git a/solace-pod-modifier-admission-plugin/cmd/main.go b/solace-pod-modifier-admission-plugin/cmd/main.go
new file mode 100644
index 00000000..57923e85
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/cmd/main.go
@@ -0,0 +1,125 @@
+// solace-pod-modifier-admission-plugin
+//
+// Copyright 2021-2022 Solace Corporation. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "context"
+ "crypto/tls"
+ "flag"
+ "fmt"
+ corev1 "k8s.io/api/core/v1"
+ "log"
+ "net/http"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+var (
+ infoLogger *log.Logger
+ warningLogger *log.Logger
+ errorLogger *log.Logger
+)
+
+var (
+ port int
+ webhookNamespace, webhookServiceName string
+)
+
+const (
+ defaultAnnotation = "pod-modifier.solace.com/modify"
+)
+
+var (
+ annotation string
+ //requireAnnotation bool
+)
+
+type config struct {
+ Pods []corev1.Pod `json:"Pods"`
+}
+
+func init() {
+ // init loggers
+ infoLogger = log.New(os.Stderr, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile)
+ warningLogger = log.New(os.Stderr, "WARNING: ", log.Ldate|log.Ltime|log.Lshortfile)
+ errorLogger = log.New(os.Stderr, "ERROR: ", log.Ldate|log.Ltime|log.Lshortfile)
+
+ // webhook server running namespace
+ webhookNamespace = os.Getenv("POD_NAMESPACE")
+}
+
+func main() {
+ // init command flags
+ flag.IntVar(&port, "port", 8443, "Webhook server port.")
+ flag.StringVar(&webhookServiceName, "service-name", "pod-modifier", "Webhook service name.")
+ // flag.StringVar(&certFile, "tlsCertFile", "/etc/webhook/certs/cert.pem", "x509 Certificate file.")
+ // flag.StringVar(&keyFile, "tlsKeyFile", "/etc/webhook/certs/key.pem", "x509 private key file.")
+ flag.StringVar(&annotation, "annotation", defaultAnnotation, "The annotation to trigger initialization")
+ flag.Parse()
+
+ dnsNames := []string{
+ webhookServiceName,
+ webhookServiceName + "." + webhookNamespace,
+ webhookServiceName + "." + webhookNamespace + ".svc",
+ }
+ commonName := webhookServiceName + "." + webhookNamespace + ".svc"
+
+ org := "solace.com"
+ caPEM, certPEM, certKeyPEM, err := generateCert([]string{org}, dnsNames, commonName)
+ if err != nil {
+ errorLogger.Fatalf("Failed to generate ca and certificate key pair: %v", err)
+ }
+
+ pair, err := tls.X509KeyPair(certPEM.Bytes(), certKeyPEM.Bytes())
+ if err != nil {
+ errorLogger.Fatalf("Failed to load certificate key pair: %v", err)
+ }
+
+ // create or update the mutatingwebhookconfiguration
+ err = createOrUpdateMutatingWebhookConfiguration(caPEM, webhookServiceName, webhookNamespace)
+ if err != nil {
+ errorLogger.Fatalf("Failed to create or update the mutating webhook configuration: %v", err)
+ }
+
+ whsvr := &WebhookServer{
+ server: &http.Server{
+ Addr: fmt.Sprintf(":%v", port),
+ TLSConfig: &tls.Config{Certificates: []tls.Certificate{pair}},
+ },
+ }
+
+ // define http server and server handler
+ mux := http.NewServeMux()
+ mux.HandleFunc(webhookInjectPath, whsvr.serve)
+ whsvr.server.Handler = mux
+
+ // start webhook server in new routine
+ go func() {
+ if err := whsvr.server.ListenAndServeTLS("", ""); err != nil {
+ errorLogger.Fatalf("Failed to listen and serve webhook server: %v", err)
+ }
+ }()
+
+ // listening OS shutdown singal
+ signalChan := make(chan os.Signal, 1)
+ signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
+ <-signalChan
+
+ infoLogger.Printf("Got OS shutdown signal, shutting down webhook server gracefully...")
+ whsvr.server.Shutdown(context.Background())
+}
diff --git a/solace-pod-modifier-admission-plugin/cmd/webhook.go b/solace-pod-modifier-admission-plugin/cmd/webhook.go
new file mode 100644
index 00000000..de0acada
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/cmd/webhook.go
@@ -0,0 +1,264 @@
+// solace-pod-modifier-admission-plugin
+//
+// Copyright 2021-2022 Solace Corporation. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ jsonpatch "github.com/mattbaird/jsonpatch"
+ "io/ioutil"
+ admissionv1 "k8s.io/api/admission/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "net/http"
+)
+
+var (
+ runtimeScheme = runtime.NewScheme()
+ codecs = serializer.NewCodecFactory(runtimeScheme)
+ deserializer = codecs.UniversalDeserializer()
+)
+
+var ignoredNamespaces = []string{
+ metav1.NamespaceSystem,
+ metav1.NamespacePublic,
+}
+
+const (
+ admissionWebhookAnnotationInjectKey = "pod-modifier-webhook.solace.com/inject"
+ admissionWebhookAnnotationStatusKey = "pod-modifier-webhook.solace.com/status"
+)
+
+type WebhookServer struct {
+ server *http.Server
+}
+
+// Webhook Server parameters
+type WhSvrParameters struct {
+ port int // webhook server port
+ certFile string // path to the x509 certificate for https
+ keyFile string // path to the x509 private key matching `CertFile`
+}
+
+type patchOperation struct {
+ Op string `json:"op"`
+ Path string `json:"path"`
+ Value interface{} `json:"value,omitempty"`
+}
+
+// Check whether the target resource need to be mutated
+func mutationRequired(ignoredList []string, metadata *metav1.ObjectMeta) bool {
+ // skip special kubernetes system namespaces
+ for _, namespace := range ignoredList {
+ if metadata.Namespace == namespace {
+ infoLogger.Printf("Skip mutation for %v for it's in special namespace:%v", metadata.Name, metadata.Namespace)
+ return false
+ }
+ }
+ return true
+}
+
+// main mutation process
+func (whsvr *WebhookServer) mutate(ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
+ req := ar.Request
+ var pod corev1.Pod
+ if err := json.Unmarshal(req.Object.Raw, &pod); err != nil {
+ warningLogger.Printf("Could not unmarshal raw object: %v", err)
+ return &admissionv1.AdmissionResponse{
+ Result: &metav1.Status{
+ Message: err.Error(),
+ },
+ }
+ }
+
+ infoLogger.Printf("AdmissionReview for Kind=%v, Namespace=%v Name=%v (%v) UID=%v patchOperation=%v UserInfo=%v",
+ req.Kind, req.Namespace, req.Name, pod.Name, req.UID, req.Operation, req.UserInfo)
+
+ // determine whether to perform mutation
+ if !mutationRequired(ignoredNamespaces, &pod.ObjectMeta) {
+ infoLogger.Printf("Skipping mutation for %s/%s due to policy check", pod.Namespace, pod.Name)
+ return &admissionv1.AdmissionResponse{
+ Allowed: true,
+ }
+ }
+
+ // annotations := map[string]string{admissionWebhookAnnotationStatusKey: "injected"}
+ // patchBytes, err := createPatch(&pod, annotations)
+ patchBytes, err := createPatch(&pod)
+ if err != nil {
+ return &admissionv1.AdmissionResponse{
+ Result: &metav1.Status{
+ Message: err.Error(),
+ },
+ }
+ }
+ if bytes.Compare(patchBytes, []byte{}) == 0 {
+ infoLogger.Printf("No change required, not providing a patch in AdmissionResponse")
+ return &admissionv1.AdmissionResponse{
+ Allowed: true,
+ }
+ }
+ infoLogger.Printf("AdmissionResponse: patch=%v\n", string(patchBytes))
+ return &admissionv1.AdmissionResponse{
+ Allowed: true,
+ Patch: patchBytes,
+ PatchType: func() *admissionv1.PatchType {
+ pt := admissionv1.PatchTypeJSONPatch
+ return &pt
+ }(),
+ }
+}
+
+func createPatch(pod *corev1.Pod) ([]byte, error) {
+ infoLogger.Printf("Create patch for pod: %s/%s", pod.Name, pod.Namespace)
+
+ initializedPod := pod.DeepCopy()
+
+ a := pod.ObjectMeta.GetAnnotations()
+ podDefinitionAnnotation, ok := a[annotation+".podDefinition"]
+
+ if !ok {
+ infoLogger.Printf("Required '%s' annotation missing; skipping pod", annotation+".podDefinition")
+ return []byte{}, nil
+ }
+
+ var c config
+ err := json.Unmarshal([]byte(podDefinitionAnnotation), &c)
+ if err != nil {
+ errorLogger.Printf("Unmarshal failed err %v , Annotation %s", err, podDefinitionAnnotation)
+ return []byte{}, err
+ }
+
+ var cpod corev1.Pod
+ found := false
+ for _, cpod = range c.Pods {
+ if pod.ObjectMeta.Name == cpod.ObjectMeta.Name {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ infoLogger.Printf("Pod name is not matching annotation - skipping this pod.")
+ return []byte{}, nil
+ }
+
+ // Modify the containers resources, if the container name of the specification matches
+ // the container name of the "initialized pod container name"
+ // Then patch the original pod
+ found = false
+ for _, configContainer := range cpod.Spec.Containers {
+ for ii, initializedContainer := range initializedPod.Spec.Containers {
+ if configContainer.Name == initializedContainer.Name {
+ initializedPod.Spec.Containers[ii].Resources = configContainer.Resources
+ found = true
+ }
+ }
+ }
+ if !found {
+ infoLogger.Printf("No container name is matching annotation - skipping this pod.")
+ return []byte{}, nil
+ }
+
+ oldData, err := json.Marshal(pod)
+ if err != nil {
+ errorLogger.Printf(err.Error())
+ return []byte{}, err
+ }
+
+ newData, err := json.Marshal(initializedPod)
+ if err != nil {
+ errorLogger.Printf(err.Error())
+ return []byte{}, err
+ }
+ patch, err := jsonpatch.CreatePatch(oldData, newData)
+ if err != nil {
+ errorLogger.Printf(err.Error())
+ return []byte{}, err
+ }
+
+ patchBytes, err := json.Marshal(patch)
+ if err != nil {
+ errorLogger.Printf(err.Error())
+ return []byte{}, err
+ }
+
+ return patchBytes, nil
+}
+
+// Serve method for webhook server
+func (whsvr *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
+ var body []byte
+ if r.Body != nil {
+ if data, err := ioutil.ReadAll(r.Body); err == nil {
+ body = data
+ }
+ }
+ if len(body) == 0 {
+ warningLogger.Println("empty body")
+ http.Error(w, "empty body", http.StatusBadRequest)
+ return
+ }
+
+ // verify the content type is accurate
+ contentType := r.Header.Get("Content-Type")
+ if contentType != "application/json" {
+ warningLogger.Printf("Content-Type=%s, expect application/json", contentType)
+ http.Error(w, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType)
+ return
+ }
+
+ var admissionResponse *admissionv1.AdmissionResponse
+ ar := admissionv1.AdmissionReview{}
+ if _, _, err := deserializer.Decode(body, nil, &ar); err != nil {
+ warningLogger.Printf("Can't decode body: %v", err)
+ admissionResponse = &admissionv1.AdmissionResponse{
+ Result: &metav1.Status{
+ Message: err.Error(),
+ },
+ }
+ } else {
+ admissionResponse = whsvr.mutate(&ar)
+ }
+
+ admissionReview := admissionv1.AdmissionReview{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "admission.k8s.io/v1",
+ Kind: "AdmissionReview",
+ },
+ }
+ if admissionResponse != nil {
+ admissionReview.Response = admissionResponse
+ if ar.Request != nil {
+ admissionReview.Response.UID = ar.Request.UID
+ }
+ }
+
+ resp, err := json.Marshal(admissionReview)
+ if err != nil {
+ warningLogger.Printf("Can't encode response: %v", err)
+ http.Error(w, fmt.Sprintf("could not encode response: %v", err), http.StatusInternalServerError)
+ }
+ infoLogger.Printf("Ready to write reponse ...")
+ if _, err := w.Write(resp); err != nil {
+ warningLogger.Printf("Can't write response: %v", err)
+ http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
+ }
+}
diff --git a/solace-pod-modifier-admission-plugin/cmd/webhookconfig.go b/solace-pod-modifier-admission-plugin/cmd/webhookconfig.go
new file mode 100644
index 00000000..cac9ef4a
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/cmd/webhookconfig.go
@@ -0,0 +1,124 @@
+// solace-pod-modifier-admission-plugin
+//
+// Copyright 2021-2022 Solace Corporation. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "os"
+ "reflect"
+
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/tools/clientcmd"
+)
+
+var (
+ webhookConfigName = "pod-modifier.solace.com"
+ webhookInjectPath = "/activate"
+)
+
+func createOrUpdateMutatingWebhookConfiguration(caPEM *bytes.Buffer, webhookService, webhookNamespace string) error {
+ infoLogger.Println("Initializing the kube client...")
+
+ kubeconfig := os.Getenv("KUBECONFIG")
+ config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
+ if err != nil {
+ return err
+ }
+ clientset, err := kubernetes.NewForConfig(config)
+ if err != nil {
+ return err
+ }
+ mutatingWebhookConfigV1Client := clientset.AdmissionregistrationV1()
+
+ infoLogger.Printf("Creating or updating the mutatingwebhookconfiguration: %s", webhookConfigName)
+ fail := admissionregistrationv1.Fail
+ sideEffect := admissionregistrationv1.SideEffectClassNone
+ mutatingWebhookConfig := &admissionregistrationv1.MutatingWebhookConfiguration{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: webhookConfigName,
+ },
+ Webhooks: []admissionregistrationv1.MutatingWebhook{{
+ Name: "pod-modifier.solace.com",
+ AdmissionReviewVersions: []string{"v1", "v1beta1"},
+ SideEffects: &sideEffect,
+ ClientConfig: admissionregistrationv1.WebhookClientConfig{
+ CABundle: caPEM.Bytes(), // self-generated CA for the webhook
+ Service: &admissionregistrationv1.ServiceReference{
+ Name: webhookService,
+ Namespace: webhookNamespace,
+ Path: &webhookInjectPath,
+ },
+ },
+ Rules: []admissionregistrationv1.RuleWithOperations{
+ {
+ Operations: []admissionregistrationv1.OperationType{
+ admissionregistrationv1.Create,
+ admissionregistrationv1.Update,
+ },
+ Rule: admissionregistrationv1.Rule{
+ APIGroups: []string{""},
+ APIVersions: []string{"v1"},
+ Resources: []string{"pods"},
+ },
+ },
+ },
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "pod-modifier.solace.com": "enabled",
+ },
+ },
+ FailurePolicy: &fail,
+ }},
+ }
+
+ foundWebhookConfig, err := mutatingWebhookConfigV1Client.MutatingWebhookConfigurations().Get(context.TODO(), webhookConfigName, metav1.GetOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ if _, err := mutatingWebhookConfigV1Client.MutatingWebhookConfigurations().Create(context.TODO(), mutatingWebhookConfig, metav1.CreateOptions{}); err != nil {
+ warningLogger.Printf("Failed to create the mutatingwebhookconfiguration: %s", webhookConfigName)
+ return err
+ }
+ infoLogger.Printf("Created mutatingwebhookconfiguration: %s", webhookConfigName)
+ } else if err != nil {
+ warningLogger.Printf("Failed to check the mutatingwebhookconfiguration: %s", webhookConfigName)
+ return err
+ } else {
+ // there is an existing mutatingWebhookConfiguration
+ if len(foundWebhookConfig.Webhooks) != len(mutatingWebhookConfig.Webhooks) ||
+ !(foundWebhookConfig.Webhooks[0].Name == mutatingWebhookConfig.Webhooks[0].Name &&
+ reflect.DeepEqual(foundWebhookConfig.Webhooks[0].AdmissionReviewVersions, mutatingWebhookConfig.Webhooks[0].AdmissionReviewVersions) &&
+ reflect.DeepEqual(foundWebhookConfig.Webhooks[0].SideEffects, mutatingWebhookConfig.Webhooks[0].SideEffects) &&
+ reflect.DeepEqual(foundWebhookConfig.Webhooks[0].FailurePolicy, mutatingWebhookConfig.Webhooks[0].FailurePolicy) &&
+ reflect.DeepEqual(foundWebhookConfig.Webhooks[0].Rules, mutatingWebhookConfig.Webhooks[0].Rules) &&
+ reflect.DeepEqual(foundWebhookConfig.Webhooks[0].NamespaceSelector, mutatingWebhookConfig.Webhooks[0].NamespaceSelector) &&
+ reflect.DeepEqual(foundWebhookConfig.Webhooks[0].ClientConfig.CABundle, mutatingWebhookConfig.Webhooks[0].ClientConfig.CABundle) &&
+ reflect.DeepEqual(foundWebhookConfig.Webhooks[0].ClientConfig.Service, mutatingWebhookConfig.Webhooks[0].ClientConfig.Service)) {
+ mutatingWebhookConfig.ObjectMeta.ResourceVersion = foundWebhookConfig.ObjectMeta.ResourceVersion
+ if _, err := mutatingWebhookConfigV1Client.MutatingWebhookConfigurations().Update(context.TODO(), mutatingWebhookConfig, metav1.UpdateOptions{}); err != nil {
+ warningLogger.Printf("Failed to update the mutatingwebhookconfiguration: %s", webhookConfigName)
+ return err
+ }
+ infoLogger.Printf("Updated the mutatingwebhookconfiguration: %s", webhookConfigName)
+ }
+ infoLogger.Printf("The mutatingwebhookconfiguration: %s already exists and has no change", webhookConfigName)
+ }
+
+ return nil
+}
diff --git a/solace-pod-modifier-admission-plugin/deploy/clusterrole.yaml b/solace-pod-modifier-admission-plugin/deploy/clusterrole.yaml
new file mode 100644
index 00000000..b696aa11
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/deploy/clusterrole.yaml
@@ -0,0 +1,10 @@
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: solace-mutating-webhook-configurer
+ labels:
+ app: solace-pod-modifier-webhook
+rules:
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["mutatingwebhookconfigurations"]
+ verbs: ["create", "get", "delete", "list", "patch", "update", "watch"]
diff --git a/solace-pod-modifier-admission-plugin/deploy/clusterrolebinding.yaml b/solace-pod-modifier-admission-plugin/deploy/clusterrolebinding.yaml
new file mode 100644
index 00000000..0b154fc0
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/deploy/clusterrolebinding.yaml
@@ -0,0 +1,13 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: solace-mutating-webhook-configurer-to-sa
+ labels:
+ app: solace-pod-modifier-webhook
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: solace-mutating-webhook-configurer
+subjects:
+- kind: ServiceAccount
+ name: solace-mutating-webhook-configurer-sa
diff --git a/solace-pod-modifier-admission-plugin/deploy/deployment.yaml b/solace-pod-modifier-admission-plugin/deploy/deployment.yaml
new file mode 100644
index 00000000..bf6331ed
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/deploy/deployment.yaml
@@ -0,0 +1,35 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: pod-modifier-webhook-deployment
+ labels:
+ app: solace-pod-modifier-webhook
+spec:
+ # provide multiple replicas for redundancy
+ replicas: 1
+ selector:
+ matchLabels:
+ app: solace-pod-modifier-webhook
+ template:
+ metadata:
+ labels:
+ app: solace-pod-modifier-webhook
+ spec:
+ #imagePullSecrets:
+ # - name:
+ serviceAccountName: solace-mutating-webhook-configurer-sa
+ containers:
+ - name: pod-modifier
+ image: pod-modifier
+ imagePullPolicy: Always
+ args:
+ - -service-name=pod-modifier-webhook
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/sh", "-c", "/prestop.sh"]
diff --git a/solace-pod-modifier-admission-plugin/deploy/kustomization.yaml b/solace-pod-modifier-admission-plugin/deploy/kustomization.yaml
new file mode 100644
index 00000000..404eb1c1
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/deploy/kustomization.yaml
@@ -0,0 +1,13 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+namespace: solace-pod-modifier
+resources:
+- namespace.yaml
+- clusterrole.yaml
+- clusterrolebinding.yaml
+- deployment.yaml
+- service.yaml
+- serviceaccount.yaml
+images:
+- name: pod-modifier
+ newName: pod-modifier
diff --git a/solace-pod-modifier-admission-plugin/deploy/namespace.yaml b/solace-pod-modifier-admission-plugin/deploy/namespace.yaml
new file mode 100644
index 00000000..0277c497
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/deploy/namespace.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: pod-modifier
+
diff --git a/solace-pod-modifier-admission-plugin/deploy/service.yaml b/solace-pod-modifier-admission-plugin/deploy/service.yaml
new file mode 100644
index 00000000..bd8f7a7c
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/deploy/service.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: pod-modifier-webhook
+ labels:
+ app: solace-pod-modifier-webhook
+spec:
+ ports:
+ - port: 443
+ targetPort: 8443
+ selector:
+ app: solace-pod-modifier-webhook
diff --git a/solace-pod-modifier-admission-plugin/deploy/serviceaccount.yaml b/solace-pod-modifier-admission-plugin/deploy/serviceaccount.yaml
new file mode 100644
index 00000000..55cbe943
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/deploy/serviceaccount.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: solace-mutating-webhook-configurer-sa
+ labels:
+ app: solace-pod-modifier-webhook
diff --git a/solace-pod-modifier-admission-plugin/go.mod b/solace-pod-modifier-admission-plugin/go.mod
new file mode 100644
index 00000000..d17af171
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/go.mod
@@ -0,0 +1,38 @@
+module github.com/solacedev/kube-pod-modifier
+
+go 1.17
+
+require (
+ gopkg.in/yaml.v2 v2.4.0
+ k8s.io/api v0.19.15
+ k8s.io/apimachinery v0.19.15
+ k8s.io/client-go v0.19.15
+)
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/go-logr/logr v0.2.0 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/protobuf v1.4.2 // indirect
+ github.com/google/gofuzz v1.1.0 // indirect
+ github.com/googleapis/gnostic v0.4.1 // indirect
+ github.com/imdario/mergo v0.3.5 // indirect
+ github.com/json-iterator/go v1.1.10 // indirect
+ github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.1 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
+ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b // indirect
+ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 // indirect
+ golang.org/x/sys v0.0.0-20201112073958-5cba982894dd // indirect
+ golang.org/x/text v0.3.3 // indirect
+ golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
+ google.golang.org/appengine v1.6.5 // indirect
+ google.golang.org/protobuf v1.24.0 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ k8s.io/klog/v2 v2.2.0 // indirect
+ k8s.io/utils v0.0.0-20200729134348-d5654de09c73 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
+ sigs.k8s.io/yaml v1.2.0 // indirect
+)
diff --git a/solace-pod-modifier-admission-plugin/go.sum b/solace-pod-modifier-admission-plugin/go.sum
new file mode 100644
index 00000000..f0dda325
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/go.sum
@@ -0,0 +1,335 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24 h1:uYuGXJBAi1umT+ZS4oQJUgKtfXCAYTR+n9zw1ViT0vA=
+github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+k8s.io/api v0.19.15 h1:i22aQYrQ9gaBHEAS9XvyR5ZfrTDAd+Q+JwWM+xIBv30=
+k8s.io/api v0.19.15/go.mod h1:rMRWjnIJQmurd/FdLobht6dCSbJQ+UDpyOwPaoFS7lI=
+k8s.io/apimachinery v0.19.15 h1:P37ni6/yFxRMrqgM75k/vt5xq9vnNiR3rJPTmWXrNho=
+k8s.io/apimachinery v0.19.15/go.mod h1:RMyblyny2ZcDQ/oVE+lC31u7XTHUaSXEK2IhgtwGxfc=
+k8s.io/client-go v0.19.15 h1:lDBvFBjDIExh0mFS6JbG+5B7ghuPhqXjBzlaxG81ToU=
+k8s.io/client-go v0.19.15/go.mod h1:OJMQWgHQJRDtO2BVtpkHUQOq/e5WHpXc02lSdPI0S/k=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
+k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=
+k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/solace-pod-modifier-admission-plugin/prestop.sh b/solace-pod-modifier-admission-plugin/prestop.sh
new file mode 100644
index 00000000..d555c6b8
--- /dev/null
+++ b/solace-pod-modifier-admission-plugin/prestop.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+set -exo pipefail
+
+# Point to the internal API server hostname
+APISERVER=https://kubernetes.default.svc
+
+# Path to ServiceAccount token
+SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount
+
+# Read this Pod's namespace
+NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace)
+
+# Read the ServiceAccount bearer token
+TOKEN=$(cat ${SERVICEACCOUNT}/token)
+
+# Reference the internal certificate authority (CA)
+CACERT=${SERVICEACCOUNT}/ca.crt
+
+MutatingWebhookConfigurationName=pod-modifier-webhook
+
+# Delete the validatingwebhookconfiguration with TOKEN
+curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -X DELETE ${APISERVER}/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/${MutatingWebhookConfigurationName}
+