diff --git a/DRAFT_RELEASE_NOTES.md b/DRAFT_RELEASE_NOTES.md index 22c796a31..c371da5fb 100644 --- a/DRAFT_RELEASE_NOTES.md +++ b/DRAFT_RELEASE_NOTES.md @@ -1,7 +1,7 @@ # Major Additions ## Path to Production Alignment -To better align development processes with processes in CI/CD and higher environments, we no longer recommend using Tilt for building and deploying projects. As such, upgrading projects should consider removing or at least narrowing the scope of their Tiltfile. See _**How to Upgrade**_ for more information. +To better align development processes with processes in CI/CD and higher environments, we no longer recommend using Tilt live-reloading. As such, upgrading projects should consider narrowing the scope of their Tiltfile. See _**How to Upgrade**_ for more information. ## Data Access Upgrade Data access through [GraphQL](https://graphql.org/) has been deprecated and replaced with [Trino](https://trino.io/). Trino is optimized for performing queries against large datasets by leveraging a distributed architecture that processes queries in parallel, enabling fast and scalable data retrieval. @@ -42,7 +42,6 @@ To reduce burden of upgrading aiSSEMBLE, the Baton project is used to automate t | upgrade-tiltfile-aissemble-version-migration | Updates the aiSSEMBLE version within your project's Tiltfile | | upgrade-v2-chart-files-aissemble-version-migration | Updates the Helm chart dependencies within your project's deployment resources (`-deploy/src/main/resources/apps/`) to use the latest version of the aiSSEMBLE | | upgrade-v1-chart-files-aissemble-version-migration | Updates the docker image tags within your project's deployment resources (`-deploy/src/main/resources/apps/`) to use the latest version of the aiSSEMBLE | -| spark-infrastructure-universal-config-yaml-migration | Removes the default hive username (if present) from hive-metastore-service values.yaml so that it can be set by the configuration store service | | pipeline-invocation-service-template-migrtion | Include the helm.valueFiles param to ArgoCD pipeline-invocation-service template | | docker-module-pom-dependency-type-migration | Updates the maven pipeline dependency type within your project's sub docker module pom file(`-docker/*-docker/pom.xml`) to fix the build cache checksum calculation issue | | enable-maven-docker-build-migration | Remove the maven fabric8 plugin `skip` configuration within your project's docker module pom file(`-docker/pom.xml`) to enable the maven docker build | @@ -72,10 +71,6 @@ To deactivate any of these migrations, add the following configuration to the `b ## Precondition Steps - Required for All Projects -### Maven Docker Build -To avoid duplicate docker builds, remove all the related `docker_build()` and `local_resources()` functions from your Tiltfile. Also, the `spark-worker-image.yaml` is no longer used -so `-deploy/src/main/resources/apps/spark-worker-image` directory ,and the related `k8s_yaml()` function from your Tiltfile can be removed. - ### Beginning the Upgrade To start your aiSSEMBLE upgrade, update your project's pom.xml to use the 1.11.0 version of the build-parent: ```xml @@ -86,6 +81,9 @@ To start your aiSSEMBLE upgrade, update your project's pom.xml to use the 1.11.0 ``` +### Tilt Docker Builds +To avoid duplicate docker builds, remove all the related `docker_build()` and `local_resources()` functions from your Tiltfile. Also, the `spark-worker-image.yaml` is no longer used so the `-deploy/src/main/resources/apps/spark-worker-image` directory and the related `k8s_yaml()` function from your Tiltfile can be removed. + ## Conditional Steps ## Final Steps - Required for All Projects diff --git a/extensions/extensions-helm/README.md b/extensions/extensions-helm/README.md index 3f896ac39..953b1ff32 100644 --- a/extensions/extensions-helm/README.md +++ b/extensions/extensions-helm/README.md @@ -27,52 +27,19 @@ Follow the instructions in the [_Kubernetes Artifacts Upgrade_](https://boozalle section of the _Path to Production > Container Support_ documentation page to update older projects to the new Extensions Helm baseline approach. ## Developing with Extension Helm - -When testing modifications to a Helm chart in a downstream project, special steps have to be taken as Helm charts are not published to the remote Helm Repository until a build -via GitHub Actions is completed. Firstly, all modifications to the aiSSEMBLE chart need to be committed and pushed to GitHub. Then, the chart downstream dependency needs to be -updated to point to the modified chart on your branch in GitHub. Unfortunately, this is [still not natively supported in -Helm](https://github.com/boozallen/aissemble/issues/488#issuecomment-2518466847), so we need to do some setup work to enable a plugin that provides this functionality. - -### Add the plugin to ArgoCD - -The repo server is responsible for running Helm commands to push changes into the cluster. This is -[documented](https://argo-cd.readthedocs.io/en/stable/user-guide/helm/#using-initcontainers) in ArgoCD, however these instructions didn't work, at least with our current chart -version of 7.4.1. (Note there were some discussions on the `argocd-helm` GitHub about a specific update causing a breaking change in this functionality, and the latest docs were -the "fix" for the breaking change. So could be that the old instructions would have worked fine.) To do this, we'll add an init container to the repo server that installs the -plugin to a shared volume mount. - -```yaml -aissemble-infrastructure-chart: - argo-cd: - repoServer: - env: - - name: HELM_CACHE_HOME - value: /helm-working-dir/.cache #Work around for install issue where plugins and cache locations being the same conflicts - initContainers: - - name: helm-plugin-install - image: alpine/helm - env: - - name: HELM_PLUGINS - value: /helm-working-dir/plugins #Configure Helm to write to the volume mount that the repo server uses - volumeMounts: - - mountPath: /helm-working-dir - name: helm-working-dir - command: [ "/bin/sh", "-c" ] - args: # install plugin - - apk --no-cache add curl; - helm plugin install https://github.com/aslafy-z/helm-git --version 1.3.0; - chmod -R 777 $HELM_PLUGINS; -``` - -### Updating the chart dependency - -To use your modified chart in the downstream project, the following changes should be made to the `Chart.yaml` file that pulls in the modified chart as a dependency: - - * Point `repository` to the modified chart on your branch in GitHub - * e.g.: `git+https://github.com/boozallen/aissemble/@extensions/extensions-helm/?ref=` - * _**NB:** if the chart being tested is in a nested project under extensions-helm, update the repo path accordingly_ - * Set `version` to `1.0.0` - -### Potential pitfalls - - * There is an issue with committing Chart.lock files when using an explicit repository vs a repository alias, so Chart.lock files must not be committed. +* When completing and locally testing a migration of a module to Extensions Helm, the above steps can be taken. + * As a precursor to the above steps, it would be helpful to create a simple aiSSEMBLE project to use as a test-bed. +* It is important to note that because the module's helm charts will not be published to the Helm Repository remote + until a build via GitHub Actions is completed, the `repository` field in the module's `Chart.yaml` file must be updated + to point to the local aiSSEMBLE baseline code. More specifically, in the test-bed project, the `repository` field in + `-deploy/src/main/resources/apps//Chart.yaml` should have hold a value in the following form: +`"file://../../../../../../../aissemble/extensions/extensions-helm/aissemble--chart"` + * the file path given is relative to the location of the `Chart.yaml` file, so 7 or more `../` prefixes will be + required to reach wherever the local aiSSEMBLE baseline is stored on your local machine + * in this example 7 `../` prefixes are added to the relative path, as the test-bed project sits in the same directory + as the local `aissemble` baseline code. +* Additionally, for local development only, the application's `Chart.yaml` in its corresponding `aissemble--chart` + should set the `version` and `appVersion` field to whatever the current aiSSEMBLE version is; this will allow for + testing of the local deployment when leveraging tilt + * If making use of additional aiSSEMBLE charts within your application's dependencies, the dependent subcharts should + have their `version` and `appVersion` updated to the current aiSSEMBLE version as well \ No newline at end of file diff --git a/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/README.md b/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/README.md index a75bbb236..12afd44a8 100644 --- a/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/README.md +++ b/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/README.md @@ -34,7 +34,6 @@ helm install hive-metastore-service oci://ghcr.io/boozallen/aissemble-hive-metas | deployment.volumes | The deployment volumes | No |   - name: metastore-service-config
  configMap:
   name: metastore-service-config
   items:
     - key: metastore-site.xml
     path: metastore-site.xml | | service.spec.ports | The service spec ports | No |   - name: "thrift"
  port: 9083
  targetPort: 9083 | | mysql.enabled | Whether to use mysql as the backing database | No | true | -| configMap.configStore | Whether to use Configuration Store to inject property values | No | enabled | | configMap.metastoreServiceConfig.baseProperties | Default configuration for the metastore service | No | See [values.yaml](./values.yaml) | | configMap.metastoreServiceConfig.properties | Optional configuration for the metastore service. Properties added here will be included in the configuration without overriding the default properties | No | | @@ -49,7 +48,6 @@ configuration options. |------------------|---------------------| | fullnameOverride | "hive-metastore-db" | | auth.database | "metastore" | -| auth.username | "hive" | # Migration from aiSSEMBLE v1 Helm Charts @@ -78,7 +76,7 @@ In the table below, the notation `env[KEY]` refers the `env` list item whose `na | configMap.metastoreServiceConfig.configuration.property[metastore.expression.proxy] | configMap.metastoreServiceConfig.baseProperties[metastore.expression.proxy] | | | configMap.metastoreServiceConfig.configuration.property[javax.jdo.option.ConnectionDriverName] | configMap.metastoreServiceConfig.baseProperties[javax.jdo.option.ConnectionDriverName] | | | configMap.metastoreServiceConfig.configuration.property[javax.jdo.option.ConnectionURL] | configMap.metastoreServiceConfig.baseProperties[javax.jdo.option.ConnectionURL] | | -| configMap.metastoreServiceConfig.configuration.property[javax.jdo.option.ConnectionUserName] | configMap.metastoreServiceConfig.baseProperties[javax.jdo.option.ConnectionUserName] | Using Configuration store Service to inject this value | + ## Property Removed The following properties no longer exist. diff --git a/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/templates/configmap.yaml b/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/templates/configmap.yaml index ac30db9c1..ced1c23ab 100644 --- a/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/templates/configmap.yaml +++ b/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/templates/configmap.yaml @@ -2,8 +2,6 @@ apiVersion: v1 kind: ConfigMap metadata: name: metastore-service-config - labels: - aissemble-configuration-store: {{ .Values.configMap.configStore}} data: # Add all the default properties from the local values.yaml to the ConfigMap # Then check if there are any downstream properties and add them as well diff --git a/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/tests/configmap_test.yaml b/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/tests/configmap_test.yaml index af70dc3e8..bd589096b 100644 --- a/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/tests/configmap_test.yaml +++ b/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/tests/configmap_test.yaml @@ -33,11 +33,6 @@ tests: jdbc:mysql://hive-metastore-db:3306/metastore?createDatabaseIfNotExist=true&allowPublicKeyRetrieval=true&useSSL=false JDBC connect string for a JDBC metastore - - javax.jdo.option.ConnectionUserName - $getConfigValue(groupName=spark-infrastructure;propertyName=metastore.db.username) - Username to use against metastore database - - it: Should override default properties appropriately set: @@ -95,11 +90,6 @@ tests: jdbc:mysql://hive-metastore-db:3306/metastore?createDatabaseIfNotExist=true&allowPublicKeyRetrieval=true&useSSL=false JDBC connect string for a JDBC metastore - - javax.jdo.option.ConnectionUserName - $getConfigValue(groupName=spark-infrastructure;propertyName=metastore.db.username) - Username to use against metastore database - propertyName1 value1 @@ -139,11 +129,6 @@ tests: jdbc:mysql://hive-metastore-db:3306/metastore?createDatabaseIfNotExist=true&allowPublicKeyRetrieval=true&useSSL=false JDBC connect string for a JDBC metastore - - javax.jdo.option.ConnectionUserName - $getConfigValue(groupName=spark-infrastructure;propertyName=metastore.db.username) - Username to use against metastore database - metastore.thrift.uris thrift://0.0.0.0:8081 diff --git a/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/values.yaml b/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/values.yaml index 0915ede9e..8a207519d 100644 --- a/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/values.yaml +++ b/extensions/extensions-helm/extensions-helm-spark-infrastructure/aissemble-hive-metastore-service-chart/values.yaml @@ -15,16 +15,11 @@ image: dockerRepo: "ghcr.io/" mysql: - commonLabels: - aissemble-configuration-store: enabled enabled: true fullnameOverride: "hive-metastore-db" auth: # The schematool for the metastore service will create the database for us. database: "metastore" - # Note: Changing these values requires removal of the `hive-metastore-db-0` PVC, or manual modification of the - # persisted database. - username: $getConfigValue(groupName=spark-infrastructure;propertyName=metastore.db.username) hive: dbType: "mysql" @@ -63,7 +58,6 @@ service: # hive-metastore-service Config Map configMap: - configStore: enabled metastoreServiceConfig: baseProperties: - name: metastore.thrift.uris @@ -78,7 +72,4 @@ configMap: - name: javax.jdo.option.ConnectionURL value: jdbc:mysql://hive-metastore-db:3306/metastore?createDatabaseIfNotExist=true&allowPublicKeyRetrieval=true&useSSL=false description: JDBC connect string for a JDBC metastore - - name: javax.jdo.option.ConnectionUserName - value: $getConfigValue(groupName=spark-infrastructure;propertyName=metastore.db.username) - description: Username to use against metastore database properties: {} \ No newline at end of file diff --git a/foundation/foundation-archetype/src/main/resources/META-INF/archetype-post-generate.groovy b/foundation/foundation-archetype/src/main/resources/META-INF/archetype-post-generate.groovy index feff5490f..f4d283536 100644 --- a/foundation/foundation-archetype/src/main/resources/META-INF/archetype-post-generate.groovy +++ b/foundation/foundation-archetype/src/main/resources/META-INF/archetype-post-generate.groovy @@ -13,10 +13,6 @@ import javax.lang.model.SourceVersion final Logger logger = LoggerFactory.getLogger("com.boozallen.aissemble.foundation.archetype-post-generate") -def dir = new File(new File(request.outputDirectory), request.artifactId) -file = new File(dir,"deploy.sh") -file.setExecutable(true, false) - def groupIdRegex = ~'^[a-z][a-z0-9]*(?:\\.[a-z][a-z0-9]*)*$' // lowercase letters, numbers, and periods def artifactIdRegex = ~'^[a-z][a-z0-9]*(?:-?[\\da-z]+)*$' // lowercase letters, numbers, and hyphens def versionRegex = ~'^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$' // Semantic Versioning diff --git a/foundation/foundation-archetype/src/main/resources/META-INF/maven/archetype-metadata.xml b/foundation/foundation-archetype/src/main/resources/META-INF/maven/archetype-metadata.xml index 3b395f64d..024da6735 100644 --- a/foundation/foundation-archetype/src/main/resources/META-INF/maven/archetype-metadata.xml +++ b/foundation/foundation-archetype/src/main/resources/META-INF/maven/archetype-metadata.xml @@ -61,7 +61,6 @@ Tiltfile - deploy.sh .tiltignore devops/** jenkinsPipelineSteps.groovy @@ -107,20 +106,6 @@ - - - - - - */** - - - pom.xml - - - - diff --git a/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/.helmignore b/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/.helmignore deleted file mode 100644 index 920cc413b..000000000 --- a/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/.helmignore +++ /dev/null @@ -1,2 +0,0 @@ -pom.xml -target \ No newline at end of file diff --git a/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/Chart.yaml b/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/Chart.yaml deleted file mode 100644 index 765b5247b..000000000 --- a/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/Chart.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v2 -name: "${parentArtifactId}" -version: "${version}" -appVersion: "${version}" - -dependencies: - - name: aissemble-infrastructure-chart - version: ${archetypeVersion} - repository: oci://ghcr.io/boozallen diff --git a/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/pom.xml b/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/pom.xml deleted file mode 100644 index 5731396ce..000000000 --- a/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/pom.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - 4.0.0 - - - ${groupId} - ${parentArtifactId} - ${version} - - - ${parentArtifactId}-infrastructure - ${projectName}::Infrastructure - Contains the infrastructure artifacts for ${projectName} - helm - - - - - ${group.helm.plugin} - helm-maven-plugin - true - - - - - diff --git a/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/values-dev.yaml b/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/values-dev.yaml deleted file mode 100644 index de5fd68f7..000000000 --- a/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/values-dev.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Deploys ArgoCD with anonymous admin access enabled. -# ArgoCD will be available at http://localhost:30080/ -aissemble-infrastructure-chart: - jenkins: - enabled: false - ingress-nginx: - enabled: false - argo-cd: - crds: - keep: false - configs: - cm: - admin.enabled: false - users.anonymous.enabled: true - rbac: - policy.default: "role:admin" - server: - ingress: - enabled: false - service: - type: "NodePort" diff --git a/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/values.yaml b/foundation/foundation-archetype/src/main/resources/archetype-resources/__rootArtifactId__-infrastructure/values.yaml deleted file mode 100644 index e69de29bb..000000000 diff --git a/foundation/foundation-archetype/src/main/resources/archetype-resources/deploy.sh b/foundation/foundation-archetype/src/main/resources/archetype-resources/deploy.sh deleted file mode 100755 index ce9dd51fe..000000000 --- a/foundation/foundation-archetype/src/main/resources/archetype-resources/deploy.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/sh - -APP_NAME=${artifactId} -INFRA_NAME=infrastructure - -print_usage() { - echo "Usage: $0 [up|down|shutdown]" - echo " startup create/upgrade deployment infrastructure" - echo " shutdown tear down deployment infrastructure (tears down application if needed)" - echo " up deploy application (starts deployment infrastructure if needed)" - echo " down tear down application" -} - -startup() { - echo "Deploying infrastructure..." - helm upgrade --install $INFRA_NAME ${artifactId}-infrastructure \ - --values ${artifactId}-infrastructure/values.yaml \ - --values ${artifactId}-infrastructure/values-dev.yaml - if ! kubectl rollout status --namespace argocd deployment/argocd-server --timeout=30s; then - exit $? - fi - argocd repo add ${projectGitUrl} --server localhost:30080 --plaintext --insecure-skip-server-verification -} - -is_app_running() { - argocd app get $APP_NAME --server localhost:30080 --plaintext > /dev/null 2>&1 -} - -deploy() { - echo "Checking for deployment infrastructure..." - helm status $INFRA_NAME > /dev/null 2>&1 - if [ $? -ne 0 ]; then - startup - fi - - if is_app_running; then - echo "${artifactId} is deployed" - else - branch=$(git rev-parse --abbrev-ref HEAD) - echo "Deploying ${artifactId} from branch '$branch'..." - argocd app create $APP_NAME \ - --server localhost:30080 --plaintext \ - --dest-namespace ${artifactId} \ - --dest-server https://kubernetes.default.svc \ - --repo ${projectGitUrl} \ - --path ${artifactId}-deploy/src/main/resources \ - --revision $branch \ - --helm-set spec.targetRevision=$branch \ - --values values.yaml \ - --values values-dev.yaml \ - --sync-policy automated - fi -} - -down() { - if is_app_running; then - echo "Tearing down app..." - argocd app delete $APP_NAME --server localhost:30080 --plaintext --yes - else - echo "${artifactId} is not deployed" - fi -} - -shutdown() { - helm status $INFRA_NAME > /dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "Infrastructure already shutdown" - else - if is_app_running; then - down - fi - echo "Shutting down infrastructure..." - helm uninstall $INFRA_NAME - fi -} - - -if [ "$1" = "up" ]; then - deploy -elif [ "$1" = "down" ]; then - down -elif [ "$1" = "shutdown" ]; then - shutdown -elif [ "$1" = "startup" ]; then - startup -else - print_usage -fi diff --git a/foundation/foundation-mda/src/main/java/com/boozallen/aiops/mda/generator/ModelAgnosticResourcesGenerator.java b/foundation/foundation-mda/src/main/java/com/boozallen/aiops/mda/generator/ModelAgnosticResourcesGenerator.java index ec56e4473..e40e2e356 100644 --- a/foundation/foundation-mda/src/main/java/com/boozallen/aiops/mda/generator/ModelAgnosticResourcesGenerator.java +++ b/foundation/foundation-mda/src/main/java/com/boozallen/aiops/mda/generator/ModelAgnosticResourcesGenerator.java @@ -22,29 +22,27 @@ public class ModelAgnosticResourcesGenerator extends AbstractModelAgnosticGenerator { /*--~-~-~~ * Usages: - * | Target | Template | Generated File | - * |------------------------------------|-----------------------------------------------------------------------------------------|------------------------------------------------------| - * | cdiBeansXml | beans.xml.vm | META-INF/beans.xml | - * | cucumberPipelineFeature | cucumber.pipeline.feature.vm | specifications/pipeline.feature | - * | cucumberProperties | cucumber.properties.vm | cucumber.properties | - * | dataAccessApplicationProperties | data-access/data.access.application.properties.vm | application.properties | - * | baseSparkInfrastructureProperties | deployment/spark-infrastructure/configurations/base/spark-infrastructure.properties.vm | configurations/base/spark-infrastructure.properties | - * | envSparkInfrastructureProperties | deployment/spark-infrastructure/configurations/env/spark-infrastructure.properties.vm | configurations/env/spark-infrastructure.properties | - * | mlflowStartScript | general-docker/mlflow.start.sh.vm | start.sh | - * | sparkDataDeliveryProperties | general-docker/spark.data.delivery.properties.vm | krausening/base/spark-data-delivery.properties | - * | authConfigResource | general-mlflow/auth.properties.vm | krausening/base/auth.properties | - * | inferenceConfigResource | general-mlflow/inference.config.properties.vm | krausening/base/inference.properties | - * | trainingPipelineConfigResource | general-mlflow/training.config.properties.vm | krausening/base/pipeline.properties | - * | itChartYaml | integration-test/it.chart.yaml.vm | test-chart/Chart.yaml | - * | itPipelineSpecification | integration-test/it.pipeline.spec.vm | specifications/pipeline.feature | - * | itServiceAccountYaml | integration-test/it.serviceaccount.yaml.vm | test-chart/templates/serviceaccount.yaml | - * | itTestYaml | integration-test/it.test.yaml.vm | test-chart/templates/test.yaml | - * | itTiltfile | integration-test/it.tiltfile.vm | Tiltfile | - * | itValuesCIYaml | integration-test/it.values.ci.yaml.vm | test-chart/values-ci.yaml | - * | itValuesPipelineYaml | integration-test/it.values.pipeline.yaml.vm | test-chart/values-pipeline.yaml | - * | itValuesYaml | integration-test/it.values.yaml.vm | test-chart/values.yaml | - * | testLog4jConfiguration | log4j2.xml.vm | log4j2.xml | - * | globalDeploymentConfigFile | pipeline-models/deployment-config.json.vm | deployment-config.json | + * | Target | Template | Generated File | + * |----------------------------------|----------------------------------------------------|-------------------------------------------------| + * | cdiBeansXml | beans.xml.vm | META-INF/beans.xml | + * | cucumberPipelineFeature | cucumber.pipeline.feature.vm | specifications/pipeline.feature | + * | cucumberProperties | cucumber.properties.vm | cucumber.properties | + * | dataAccessApplicationProperties | data-access/data.access.application.properties.vm | application.properties | + * | mlflowStartScript | general-docker/mlflow.start.sh.vm | start.sh | + * | sparkDataDeliveryProperties | general-docker/spark.data.delivery.properties.vm | krausening/base/spark-data-delivery.properties | + * | authConfigResource | general-mlflow/auth.properties.vm | krausening/base/auth.properties | + * | inferenceConfigResource | general-mlflow/inference.config.properties.vm | krausening/base/inference.properties | + * | trainingPipelineConfigResource | general-mlflow/training.config.properties.vm | krausening/base/pipeline.properties | + * | itChartYaml | integration-test/it.chart.yaml.vm | test-chart/Chart.yaml | + * | itPipelineSpecification | integration-test/it.pipeline.spec.vm | specifications/pipeline.feature | + * | itServiceAccountYaml | integration-test/it.serviceaccount.yaml.vm | test-chart/templates/serviceaccount.yaml | + * | itTestYaml | integration-test/it.test.yaml.vm | test-chart/templates/test.yaml | + * | itTiltfile | integration-test/it.tiltfile.vm | Tiltfile | + * | itValuesCIYaml | integration-test/it.values.ci.yaml.vm | test-chart/values-ci.yaml | + * | itValuesPipelineYaml | integration-test/it.values.pipeline.yaml.vm | test-chart/values-pipeline.yaml | + * | itValuesYaml | integration-test/it.values.yaml.vm | test-chart/values.yaml | + * | testLog4jConfiguration | log4j2.xml.vm | log4j2.xml | + * | globalDeploymentConfigFile | pipeline-models/deployment-config.json.vm | deployment-config.json | */ diff --git a/foundation/foundation-mda/src/main/resources/profiles.json b/foundation/foundation-mda/src/main/resources/profiles.json index d1db06f0e..4f0b362ee 100644 --- a/foundation/foundation-mda/src/main/resources/profiles.json +++ b/foundation/foundation-mda/src/main/resources/profiles.json @@ -322,12 +322,6 @@ }, { "name": "valuesCIFile" - }, - { - "name": "baseSparkInfrastructureProperties" - }, - { - "name": "envSparkInfrastructureProperties" } ] }, diff --git a/foundation/foundation-mda/src/main/resources/targets.json b/foundation/foundation-mda/src/main/resources/targets.json index 82154275f..6b2ce63d3 100644 --- a/foundation/foundation-mda/src/main/resources/targets.json +++ b/foundation/foundation-mda/src/main/resources/targets.json @@ -2405,22 +2405,6 @@ "metadataContext": "targeted", "overwritable": false }, - { - "name": "baseSparkInfrastructureProperties", - "templateName": "templates/deployment/spark-infrastructure/configurations/base/spark-infrastructure.properties.vm", - "outputFile": "configurations/base/spark-infrastructure.properties", - "generator": "com.boozallen.aiops.mda.generator.ModelAgnosticResourcesGenerator", - "metadataContext": "targeted", - "overwritable": false - }, - { - "name": "envSparkInfrastructureProperties", - "templateName": "templates/deployment/spark-infrastructure/configurations/env/spark-infrastructure.properties.vm", - "outputFile": "configurations/env/spark-infrastructure.properties", - "generator": "com.boozallen.aiops.mda.generator.ModelAgnosticResourcesGenerator", - "metadataContext": "targeted", - "overwritable": false - }, { "name": "generatedPostActionInit", "templateName": "templates/python.init.py.vm", diff --git a/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/configurations/base/spark-infrastructure.properties.vm b/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/configurations/base/spark-infrastructure.properties.vm deleted file mode 100644 index 6e2a3f3e4..000000000 --- a/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/configurations/base/spark-infrastructure.properties.vm +++ /dev/null @@ -1,4 +0,0 @@ - -# Password and other sensitive information should be encrypted using krausening -# (See Here: https://github.com/TechnologyBrewery/krausening/tree/dev/krausening/#krausening-in-four-pints-leveraging-jasypt-for-encryptingdecrypting-properties) -metastore.db.username=hive \ No newline at end of file diff --git a/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/configurations/env/spark-infrastructure.properties.vm b/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/configurations/env/spark-infrastructure.properties.vm deleted file mode 100644 index 7670a5253..000000000 --- a/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/configurations/env/spark-infrastructure.properties.vm +++ /dev/null @@ -1,3 +0,0 @@ - -# Password and other sensitive information should be encrypted using krausening -# (See Here: https://github.com/TechnologyBrewery/krausening/tree/dev/krausening/#krausening-in-four-pints-leveraging-jasypt-for-encryptingdecrypting-properties) \ No newline at end of file diff --git a/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/v2/spark.infrastructure.values.yaml.vm b/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/v2/spark.infrastructure.values.yaml.vm index ea55a88ba..9b4ad620b 100644 --- a/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/v2/spark.infrastructure.values.yaml.vm +++ b/foundation/foundation-mda/src/main/resources/templates/deployment/spark-infrastructure/v2/spark.infrastructure.values.yaml.vm @@ -50,6 +50,7 @@ aissemble-hive-metastore-service-chart: auth: # Note: Changing these values requires removal of the `hive-metastore-db-0` PVC, or manual modification of the # persisted database. + username: hive rootPassword: hive replicationPassword: hive password: hive @@ -67,11 +68,11 @@ aissemble-hive-metastore-service-chart: name: remote-auth-config key: AWS_ACCESS_KEY_ID configMap: - metadata: - labels: - configStore: enabled metastoreServiceConfig: properties: + - name: javax.jdo.option.ConnectionUserName + value: hive + description: Username to use against metastore database - name: javax.jdo.option.ConnectionPassword value: hive description: Password to use against metastore database diff --git a/foundation/foundation-mda/src/test/java/com/boozallen/aiops/mda/generator/PropertiesStep.java b/foundation/foundation-mda/src/test/java/com/boozallen/aiops/mda/generator/PropertiesStep.java deleted file mode 100644 index 4fa850e2d..000000000 --- a/foundation/foundation-mda/src/test/java/com/boozallen/aiops/mda/generator/PropertiesStep.java +++ /dev/null @@ -1,80 +0,0 @@ -package com.boozallen.aiops.mda.generator;/*- - * #%L - * aiSSEMBLE::Foundation::MDA - * %% - * Copyright (C) 2021 Booz Allen - * %% - * This software package is licensed under the Booz Allen Public License. All Rights Reserved. - * #L% - */ - -import com.boozallen.aiops.mda.metamodel.element.*; -import io.cucumber.java.After; -import io.cucumber.java.AfterStep; -import io.cucumber.java.Before; -import io.cucumber.java.Scenario; -import io.cucumber.java.en.Given; -import io.cucumber.java.en.Then; -import io.cucumber.java.en.When; -import org.apache.commons.io.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.technologybrewery.fermenter.mda.GenerateSourcesHelper; -import org.technologybrewery.fermenter.mda.element.ExpandedProfile; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertTrue; - -public class PropertiesStep extends AbstractModelInstanceSteps { - private static final Logger logger = LoggerFactory.getLogger(PropertiesStep.class); - - @Before("@properties-generation") - public void setup(Scenario scenario) throws IOException { - this.scenario = scenario.getName(); - } - - @AfterStep("@properties-generation") - public void cleanup(Scenario scenario) throws IOException { - FileUtils.deleteDirectory(GENERATED_METADATA_DIRECTORY); - } - - @Given("project called {string}") - public void project_called(String projectName) throws IOException { - createProject(projectName, "shared"); - } - - @Given("{string} pipeline is using {string}") - public void pipeline_using(String typeName, String implName) throws IOException { - createAndSavePipeline(unique("TestPipeline"), typeName, implName); - } - - @When("the profile for {string} is generated") - public void profile_spark_infra_is_generated(String profileName) throws Exception { - readMetadata(projectName); - Map profiles = loadProfiles(); - GenerateSourcesHelper.performSourceGeneration(profileName, profiles, this::createGenerationContext, (missingProfile, foundProfiles) -> { - throw new RuntimeException("Missing profile: " + missingProfile); - }, new Slf4jDelegate(logger), projectDir.toFile()); - } - - @Then("spark-infrastructure.properties file is generated in {string}") - public void properties_file_generated(String propertiesPath) { - Path properties = projectDir.resolve(propertiesPath); - assertTrue("File not created: " + properties, Files.exists(properties) && Files.isRegularFile(properties)); - } - - @Then("spark-infrastructure.properties file generated in {string}, {string} properties are set to {string}") - public void properties_set_values_correctly(String propertiesPath, String propertyName, String propertyValue) throws IOException { - Path properties = projectDir.resolve(propertiesPath); - List lines = Files.readAllLines(properties); - String expectedPropertiesMap = propertyName + "=" + propertyValue; - assertTrue("Expected properties not found in " + propertiesPath, lines.contains(expectedPropertiesMap)); - } - - -} \ No newline at end of file diff --git a/foundation/foundation-mda/src/test/resources/specifications/properties-generation.feature b/foundation/foundation-mda/src/test/resources/specifications/properties-generation.feature deleted file mode 100644 index 1f0c26aff..000000000 --- a/foundation/foundation-mda/src/test/resources/specifications/properties-generation.feature +++ /dev/null @@ -1,16 +0,0 @@ -@properties-generation -Feature: Generating properties file resources - - @module-generation - Scenario:spark-infrastructure properties generation - Given project called "example" - And "data-flow" pipeline is using "data-delivery-spark" - When the profile for "aissemble-spark-infrastructure-deploy-v2" is generated - Then spark-infrastructure.properties file is generated in "" - And spark-infrastructure.properties file generated in "main/resources/configurations/base/spark-infrastructure.properties", "metastore.db.username" properties are set to "hive" - - - Examples: - | sparkInfrastructurePropertiesPath | - | main/resources/configurations/base/spark-infrastructure.properties | - | main/resources/configurations/env/spark-infrastructure.properties | \ No newline at end of file diff --git a/foundation/foundation-upgrade/src/main/java/com/boozallen/aissemble/upgrade/migration/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration.java b/foundation/foundation-upgrade/src/main/java/com/boozallen/aissemble/upgrade/migration/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration.java deleted file mode 100644 index 664c92a2f..000000000 --- a/foundation/foundation-upgrade/src/main/java/com/boozallen/aissemble/upgrade/migration/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration.java +++ /dev/null @@ -1,113 +0,0 @@ -package com.boozallen.aissemble.upgrade.migration.v1_11_0; - -/*- - * #%L - * aiSSEMBLE::Foundation::Upgrade - * %% - * Copyright (C) 2021 Booz Allen - * %% - * This software package is licensed under the Booz Allen Public License. All Rights Reserved. - * #L% - */ - -import com.boozallen.aissemble.upgrade.migration.AbstractAissembleMigration; -import com.boozallen.aissemble.upgrade.util.YamlUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.error.YAMLException; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.nio.file.Files; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; - -/** - * This migration removes hive username in the hive-metastore-service values.yaml to use values from configuration store service only if values.yaml has default value. - */ -public class SparkInfrastructureUniversalConfigYAMLMigration extends AbstractAissembleMigration { - private static final Logger logger = LoggerFactory.getLogger(SparkInfrastructureUniversalConfigYAMLMigration.class); - private static final List propertiesForConfigStore = Arrays.asList("javax.jdo.option.ConnectionUserName"); - private static final List mysqlAuthForConfigStore = Arrays.asList("username"); - private static final HashSet linesToRemove = new HashSet<>(Arrays.asList("username: hive", "- name: javax.jdo.option.ConnectionUserName" ,"value: hive", "description: Username to use against metastore database", "# NB: UCS")); - private static final String DEFAULT_METASTORE_DB_USERNAME = "hive"; - @Override - protected boolean shouldExecuteOnFile(File file) { - try { - YamlUtils.YamlObject yaml = YamlUtils.loadYaml(file); - if(yaml.hasObject("aissemble-hive-metastore-service-chart")) - { - yaml = yaml.getObject("aissemble-hive-metastore-service-chart"); - }else{ - return false; - } - boolean hasMySqlAuthDefaultUsername = false; - if (yaml.hasObject("mysql", "auth")) { - YamlUtils.YamlObject mysqlAuth = yaml.getObject("mysql", "auth"); - for(String key : mysqlAuth.keySet()) - { - if(mysqlAuthForConfigStore.contains(key) && mysqlAuth.get(key).equals(DEFAULT_METASTORE_DB_USERNAME)) - { - hasMySqlAuthDefaultUsername = true; - break; - } - } - - } - boolean hasMetastoreServiceConfigDefaultUsername = false; - if (yaml.hasList("configMap", "metastoreServiceConfig", "properties")) { - List properties = yaml.getListOfObjects("configMap", "metastoreServiceConfig", "properties"); - for(YamlUtils.YamlObject property : properties) - { - if(propertiesForConfigStore.contains(property.get("name")) && property.get("value").equals(DEFAULT_METASTORE_DB_USERNAME)) - { - hasMetastoreServiceConfigDefaultUsername = true; - break; - } - } - } - return hasMySqlAuthDefaultUsername && hasMetastoreServiceConfigDefaultUsername; - - } catch (YAMLException e) { - if (logger.isDebugEnabled()) { - logger.debug("Failed to parse values yaml file: {}", file.getPath(), e); - } else { - logger.info("Failed to parse values yaml file: {}", file.getName()); - } - } - catch (IOException e) { - throw new RuntimeException(e); - } - return false; - } - - @Override - protected boolean performMigration(File file) { - try { - File tempFile = new File("tempValuesFile.yaml"); - BufferedWriter writer = new BufferedWriter(new FileWriter(tempFile)); - List lines = Files.readAllLines(file.toPath()); - for (int i = 0; i < lines.size(); i++) { - String line = lines.get(i).trim(); - boolean skipConnectionPassword = false; - if(i >= 1 && lines.get(i-1).trim().equals("- name: javax.jdo.option.ConnectionPassword")) - { - skipConnectionPassword = true; - } - if(linesToRemove.contains(line) && !skipConnectionPassword) - { - continue; - } - writer.write(lines.get(i) + System.getProperty("line.separator")); - } - writer.close(); - - return tempFile.renameTo(file); - } catch (IOException e) { - throw new RuntimeException(e); - } - } -} diff --git a/foundation/foundation-upgrade/src/main/resources/migrations.json b/foundation/foundation-upgrade/src/main/resources/migrations.json index b1822cce8..d116db8de 100644 --- a/foundation/foundation-upgrade/src/main/resources/migrations.json +++ b/foundation/foundation-upgrade/src/main/resources/migrations.json @@ -82,17 +82,6 @@ ] } ] - }, - { - "name": "spark-infrastructure-universal-config-yaml-migration", - "implementation": "com.boozallen.aissemble.upgrade.migration.v1_11_0.SparkInfrastructureUniversalConfigYAMLMigration", - "fileSets": [ - { - "includes": [ - "*-deploy/src/main/resources/apps/spark-infrastructure/values.yaml" - ] - } - ] } ] } diff --git a/foundation/foundation-upgrade/src/test/java/com/boozallen/aissemble/upgrade/migration/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigrationTest.java b/foundation/foundation-upgrade/src/test/java/com/boozallen/aissemble/upgrade/migration/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigrationTest.java deleted file mode 100644 index bde4f887f..000000000 --- a/foundation/foundation-upgrade/src/test/java/com/boozallen/aissemble/upgrade/migration/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigrationTest.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.boozallen.aissemble.upgrade.migration.v1_11_0; - -/*- - * #%L - * aiSSEMBLE::Foundation::Upgrade - * %% - * Copyright (C) 2021 Booz Allen - * %% - * This software package is licensed under the Booz Allen Public License. All Rights Reserved. - * #L% - */ - -import com.boozallen.aissemble.upgrade.migration.AbstractMigrationTest; -import io.cucumber.java.en.Given; -import io.cucumber.java.en.Then; -import io.cucumber.java.en.When; - - -public class SparkInfrastructureUniversalConfigYAMLMigrationTest extends AbstractMigrationTest { - @Given("default values.yaml of spark infrastructure") - public void aSparkInfrastructureValuesFileWithDefaultValues() - { - testFile = getTestFile("v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/default-values.yaml"); - - } - @Given("values.yaml of spark infrastructure with hive username and password changed from \"hive\"") - public void aSparkInfrastructureValuesFileModifiedToCustomValues() - { - testFile = getTestFile("v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/custom-values.yaml"); - - } - - @When("the spark infrastructure configuration migration executes") - public void theSparkInfrastructureConfigurationMigrationExecutes() - { - SparkInfrastructureUniversalConfigYAMLMigration migration = new SparkInfrastructureUniversalConfigYAMLMigration(); - performMigration(migration); - } - - @Then("values.yaml is updated to remove hive username properties") - public void theHiveUsernameRemoved() { - assertMigrationSuccess(); - var updatedValue = getTestFile("v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/updated-values.yaml"); - assertLinesMatch("Yaml file not updated with removed hive username properties: " + testFile.getName(), testFile, updatedValue); - } - - @Then("spark infrastructure configuration migration is skipped") - public void theSparkInfrastructureConfigMigrationIsSkipped() { - assertMigrationSkipped(); - } -} diff --git a/foundation/foundation-upgrade/src/test/resources/specifications/v1_11_0/spark-infrastructure-uc-migration.feature b/foundation/foundation-upgrade/src/test/resources/specifications/v1_11_0/spark-infrastructure-uc-migration.feature deleted file mode 100644 index 04ce64ca3..000000000 --- a/foundation/foundation-upgrade/src/test/resources/specifications/v1_11_0/spark-infrastructure-uc-migration.feature +++ /dev/null @@ -1,16 +0,0 @@ -Feature: Migrate Spark Infrastructure Universal Configuration Migration - The Hive Metastore Service starts to implement Universal Configuration Store to inject hive credentials. We make configuration store call - by the default. If consumer keep default value of hive credentials, it will be migrated to use configuration store, if not it would override to use custom value without migration. - hive.username refers to javax.jdo.option.ConnectionUserName and aissemble-hive-metastore-service-chart.mysql.auth.username - -Scenario: Default Spark infrastructure file is migrated - Given default values.yaml of spark infrastructure - When the spark infrastructure configuration migration executes - Then values.yaml is updated to remove hive username properties - - - -Scenario: Spark infrastructure file with modified hive credentials is not migrated - Given values.yaml of spark infrastructure with hive username and password changed from "hive" - When the spark infrastructure configuration migration executes - Then spark infrastructure configuration migration is skipped \ No newline at end of file diff --git a/foundation/foundation-upgrade/src/test/resources/test-files/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/custom-values.yaml b/foundation/foundation-upgrade/src/test/resources/test-files/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/custom-values.yaml deleted file mode 100644 index c8f9883f0..000000000 --- a/foundation/foundation-upgrade/src/test/resources/test-files/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/custom-values.yaml +++ /dev/null @@ -1,89 +0,0 @@ -# This file contains the default values for the aiSSEMBLE Spark Infrastructure Helm chart. - -aissemble-spark-history-chart: - app: - name: "spark-history" - eventVolume: - enabled: true - -aissemble-thrift-server-chart: - app: - name: "thrift-server" - dependencies: - packages: - - org.apache.hadoop:hadoop-aws:3.3.4 - - deployment: - metadata: - labels: - aissemble-configuration-store: enabled - envFromSecret: - AWS_ACCESS_KEY_ID: - secretName: remote-auth-config - key: AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY: - secretName: remote-auth-config - key: AWS_SECRET_ACCESS_KEY - - sparkConf: | - spark.hadoop.fs.s3a.endpoint=http://s3-local:4566 - spark.hadoop.fs.s3a.access.key=$getConfigValue(groupName=aws-credentials;propertyName=AWS_SECRET_ACCESS_KEY) - spark.hadoop.fs.s3a.secret.key=$getConfigValue(groupName=aws-credentials;propertyName=AWS_SECRET_ACCESS_KEY) - spark.hadoop.fs.s3.impl=org.apache.hadoop.fs.s3a.S3AFileSystem - spark.hive.server2.thrift.port=10000 - spark.hive.server2.thrift.http.port=10001 - spark.hive.server2.transport.mode=http - spark.hive.metastore.warehouse.dir=s3a://spark-infrastructure/warehouse - spark.hadoop.fs.s3a.path.style.access=true - spark.hive.server2.thrift.http.path=cliservice - spark.hive.metastore.schema.verification=false - spark.hive.metastore.uris=thrift://hive-metastore-service:9083/default - - hiveSite: | - - - datanucleus.schema.autoCreateAll - true - Creates necessary schema on a startup if one does not exist - - - -aissemble-hive-metastore-service-chart: - mysql: - auth: - # Note: Changing these values requires removal of the `hive-metastore-db-0` PVC, or manual modification of the - # persisted database. - username: custom - # NB: UCS - rootPassword: hive - replicationPassword: hive - password: hive - - deployment: - env: - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: remote-auth-config - key: AWS_SECRET_ACCESS_KEY - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: remote-auth-config - key: AWS_ACCESS_KEY_ID - configMap: - metastoreServiceConfig: - properties: - - name: javax.jdo.option.ConnectionUserName - value: custom - description: Username to use against metastore database - # NB: UCS - - name: javax.jdo.option.ConnectionPassword - value: hive - description: Password to use against metastore database - - name: metastore.warehouse.dir - value: s3a://spark-infrastructure/warehouse - - name: fs.s3a.endpoint - value: http://s3-local:4566 - - name: fs.s3a.path.style.access - value: true diff --git a/foundation/foundation-upgrade/src/test/resources/test-files/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/default-values.yaml b/foundation/foundation-upgrade/src/test/resources/test-files/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/default-values.yaml deleted file mode 100644 index 20b1a1cbe..000000000 --- a/foundation/foundation-upgrade/src/test/resources/test-files/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/default-values.yaml +++ /dev/null @@ -1,89 +0,0 @@ -# This file contains the default values for the aiSSEMBLE Spark Infrastructure Helm chart. - -aissemble-spark-history-chart: - app: - name: "spark-history" - eventVolume: - enabled: true - -aissemble-thrift-server-chart: - app: - name: "thrift-server" - dependencies: - packages: - - org.apache.hadoop:hadoop-aws:3.3.4 - - deployment: - metadata: - labels: - aissemble-configuration-store: enabled - envFromSecret: - AWS_ACCESS_KEY_ID: - secretName: remote-auth-config - key: AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY: - secretName: remote-auth-config - key: AWS_SECRET_ACCESS_KEY - - sparkConf: | - spark.hadoop.fs.s3a.endpoint=http://s3-local:4566 - spark.hadoop.fs.s3a.access.key=$getConfigValue(groupName=aws-credentials;propertyName=AWS_SECRET_ACCESS_KEY) - spark.hadoop.fs.s3a.secret.key=$getConfigValue(groupName=aws-credentials;propertyName=AWS_SECRET_ACCESS_KEY) - spark.hadoop.fs.s3.impl=org.apache.hadoop.fs.s3a.S3AFileSystem - spark.hive.server2.thrift.port=10000 - spark.hive.server2.thrift.http.port=10001 - spark.hive.server2.transport.mode=http - spark.hive.metastore.warehouse.dir=s3a://spark-infrastructure/warehouse - spark.hadoop.fs.s3a.path.style.access=true - spark.hive.server2.thrift.http.path=cliservice - spark.hive.metastore.schema.verification=false - spark.hive.metastore.uris=thrift://hive-metastore-service:9083/default - - hiveSite: | - - - datanucleus.schema.autoCreateAll - true - Creates necessary schema on a startup if one does not exist - - - -aissemble-hive-metastore-service-chart: - mysql: - auth: - # Note: Changing these values requires removal of the `hive-metastore-db-0` PVC, or manual modification of the - # persisted database. - username: hive - # NB: UCS - rootPassword: hive - replicationPassword: hive - password: hive - - deployment: - env: - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: remote-auth-config - key: AWS_SECRET_ACCESS_KEY - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: remote-auth-config - key: AWS_ACCESS_KEY_ID - configMap: - metastoreServiceConfig: - properties: - - name: javax.jdo.option.ConnectionUserName - value: hive - description: Username to use against metastore database - # NB: UCS - - name: javax.jdo.option.ConnectionPassword - value: hive - description: Password to use against metastore database - - name: metastore.warehouse.dir - value: s3a://spark-infrastructure/warehouse - - name: fs.s3a.endpoint - value: http://s3-local:4566 - - name: fs.s3a.path.style.access - value: true diff --git a/foundation/foundation-upgrade/src/test/resources/test-files/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/updated-values.yaml b/foundation/foundation-upgrade/src/test/resources/test-files/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/updated-values.yaml deleted file mode 100644 index acb2cac02..000000000 --- a/foundation/foundation-upgrade/src/test/resources/test-files/v1_11_0/SparkInfrastructureUniversalConfigYAMLMigration/migration/updated-values.yaml +++ /dev/null @@ -1,83 +0,0 @@ -# This file contains the default values for the aiSSEMBLE Spark Infrastructure Helm chart. - -aissemble-spark-history-chart: - app: - name: "spark-history" - eventVolume: - enabled: true - -aissemble-thrift-server-chart: - app: - name: "thrift-server" - dependencies: - packages: - - org.apache.hadoop:hadoop-aws:3.3.4 - - deployment: - metadata: - labels: - aissemble-configuration-store: enabled - envFromSecret: - AWS_ACCESS_KEY_ID: - secretName: remote-auth-config - key: AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY: - secretName: remote-auth-config - key: AWS_SECRET_ACCESS_KEY - - sparkConf: | - spark.hadoop.fs.s3a.endpoint=http://s3-local:4566 - spark.hadoop.fs.s3a.access.key=$getConfigValue(groupName=aws-credentials;propertyName=AWS_SECRET_ACCESS_KEY) - spark.hadoop.fs.s3a.secret.key=$getConfigValue(groupName=aws-credentials;propertyName=AWS_SECRET_ACCESS_KEY) - spark.hadoop.fs.s3.impl=org.apache.hadoop.fs.s3a.S3AFileSystem - spark.hive.server2.thrift.port=10000 - spark.hive.server2.thrift.http.port=10001 - spark.hive.server2.transport.mode=http - spark.hive.metastore.warehouse.dir=s3a://spark-infrastructure/warehouse - spark.hadoop.fs.s3a.path.style.access=true - spark.hive.server2.thrift.http.path=cliservice - spark.hive.metastore.schema.verification=false - spark.hive.metastore.uris=thrift://hive-metastore-service:9083/default - - hiveSite: | - - - datanucleus.schema.autoCreateAll - true - Creates necessary schema on a startup if one does not exist - - - -aissemble-hive-metastore-service-chart: - mysql: - auth: - # Note: Changing these values requires removal of the `hive-metastore-db-0` PVC, or manual modification of the - # persisted database. - rootPassword: hive - replicationPassword: hive - password: hive - - deployment: - env: - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: remote-auth-config - key: AWS_SECRET_ACCESS_KEY - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: remote-auth-config - key: AWS_ACCESS_KEY_ID - configMap: - metastoreServiceConfig: - properties: - - name: javax.jdo.option.ConnectionPassword - value: hive - description: Password to use against metastore database - - name: metastore.warehouse.dir - value: s3a://spark-infrastructure/warehouse - - name: fs.s3a.endpoint - value: http://s3-local:4566 - - name: fs.s3a.path.style.access - value: true