diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..e238236f5 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,12 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + + +version: 2 +updates: + - package-ecosystem: "gomod" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "daily" diff --git a/.github/workflows/bdd.yaml b/.github/workflows/bdd.yaml new file mode 100644 index 000000000..1cbedaa8b --- /dev/null +++ b/.github/workflows/bdd.yaml @@ -0,0 +1,47 @@ +name: BDD tests + +on: + pull_request: + +jobs: + bdd: + runs-on: ubuntu-20.04 + strategy: + matrix: + go-version: + - "1.20" + name: BDD for Go ${{ matrix.go-version}} + steps: + - uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + - name: Build project + run: make build + - name: Retrieve Docker compose file + run: wget -O docker-compose.yml https://raw.githubusercontent.com/RedHatInsights/insights-behavioral-spec/main/docker-compose.yml + - name: Retrieve BDD runner + run: wget -O bdd_runner.sh https://raw.githubusercontent.com/RedHatInsights/insights-behavioral-spec/main/run_in_docker.sh && chmod +x bdd_runner.sh + - name: Run BDD + run: ./bdd_runner.sh aggregator-tests . + - name: Set container ID as env + if: always() + run: echo "BDD_CONTAINER=$(docker ps | grep 'insights-behavioral-spec:latest' | cut -d ' ' -f 1)" >> $GITHUB_ENV + - name: Debug + if: always() + run: docker exec -u root "$BDD_CONTAINER" /bin/bash -c "ls -R /insights-behavioral-spec/logs" + - name: Copy logs from container to host + if: always() + run: docker cp "$BDD_CONTAINER:/insights-behavioral-spec/logs" . + - name: Rename logs + if: always() + # Otherwise the upload-artifact action will complain + run: for filename in logs/insights-results-aggregator/*; do mv -n "$filename" "$(echo "$filename" | sed -e 's/["><:]//g')"; done + - name: Store service logs + if: always() + uses: actions/upload-artifact@v3 + with: + name: store-logs + path: | + logs/insights-results-aggregator \ No newline at end of file diff --git a/.github/workflows/dependabot-automerge.yml b/.github/workflows/dependabot-automerge.yml new file mode 100644 index 000000000..10f62e515 --- /dev/null +++ b/.github/workflows/dependabot-automerge.yml @@ -0,0 +1,37 @@ +# Set as automatically merge all the pull requests created by dependabot[bot] +name: Dependabot auto-merge +on: pull_request + +# This section adds write permissions to the secrets.GITHUB_TOKEN. Default is just read +permissions: + contents: write + pull-requests: write + +jobs: + dependabot: + runs-on: ubuntu-latest + if: github.actor == 'dependabot[bot]' + steps: + - name: Dependabot metadata + id: metadata + uses: dependabot/fetch-metadata@v1 + with: + github-token: "${{ secrets.GITHUB_TOKEN }}" + - name: Github Actions bot approves the PR + run: gh pr review --approve "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} + - name: InsightsDroid approves the PR + run: gh pr review --approve "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GH_TOKEN: ${{secrets.INSIGHTSDROID_TOKEN}} + - name: Enable auto-merge for Dependabot PRs + # We can filter depending on the semver major, minor, or patch updates, + # but let's not do it for now + # if: steps.metadata.outputs.update-type == 'version-update:semver-patch' + run: gh pr merge --auto --merge "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} \ No newline at end of file diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml index 2ee395ffc..0a4fd14f6 100644 --- a/.github/workflows/gh-pages.yml +++ b/.github/workflows/gh-pages.yml @@ -30,9 +30,9 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: '1.18' + go-version: '1.20' - name: Generate docgo and literate run: make godoc - name: Build with Jekyll diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index dc37fa791..b5396a31b 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -1,16 +1,12 @@ name: golangci-lint on: - push: - tags: - - v* - branches: - - master - - main pull_request: + permissions: contents: read # Optional: allow read access to pull request. Use with `only-new-issues` option. # pull-requests: read + jobs: golangci: name: lint @@ -31,9 +27,9 @@ jobs: args: > --enable=goimports,gosimple,nilerr,prealloc,revive,staticcheck,unconvert,unused,whitespace,zerologlint --timeout=3m - + # Optional: show only new issues if it's a pull request. The default value is `false`. - # only-new-issues: true + only-new-issues: true # Optional: if set to true then the action will use pre-installed Go. # skip-go-installation: true diff --git a/.github/workflows/gotests.yaml b/.github/workflows/gotests.yaml new file mode 100644 index 000000000..0255721c3 --- /dev/null +++ b/.github/workflows/gotests.yaml @@ -0,0 +1,42 @@ +name: Go tests + +on: + pull_request: + +jobs: + gotests: + runs-on: ubuntu-20.04 + strategy: + matrix: + go-version: + - "1.20" + - "1.21" + name: Tests for Go ${{ matrix.go-version}} + services: + postgres: + image: postgres + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: aggregator + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + steps: + - uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + - name: Print env vars + run: env + - name: Unit tests + run: make test + - name: Check code coverage + run: ./check_coverage.sh do-not-run-tests + - name: Display code coverage + run: make coverage diff --git a/.github/workflows/linters.yaml b/.github/workflows/linters.yaml new file mode 100644 index 000000000..6e84f2be2 --- /dev/null +++ b/.github/workflows/linters.yaml @@ -0,0 +1,44 @@ +name: Go linters + +on: + pull_request: + +jobs: + golint: + runs-on: ubuntu-20.04 + strategy: + matrix: + go-version: + - "1.20" + - "1.21" + name: Linters for Go ${{ matrix.go-version}} + steps: + - uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + - name: Build project + run: go build + - name: GO formatting + run: make fmt + - name: GO lint + run: make lint + - name: GO vet + run: make vet + - name: gocyclo + run: make cyclo + - name: shellcheck + run: make shellcheck + - name: errcheck + run: make errcheck + - name: goconst checker + run: make goconst + - name: gosec checker + run: make gosec + - name: ineffassign checker + run: make ineffassign + - name: ABC metrics checker + run: make abcgo + - name: JSON checker + run: make json-check \ No newline at end of file diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml new file mode 100644 index 000000000..d7a899250 --- /dev/null +++ b/.github/workflows/shellcheck.yaml @@ -0,0 +1,12 @@ +name: Shell check + +on: + pull_request: + +jobs: + shellcheck: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v3 + - name: Shell check + run: ./shellcheck.sh diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 472c2706c..000000000 --- a/.travis.yml +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2021, 2022, 2023 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -language: go -go: -- "1.20" - -services: - - postgresql - -jobs: - include: - - name: "Build 1.20" - stage: build - script: - - make - - stage: style - script: - - make style - - stage: unit tests - script: - - make test - - ./check_coverage.sh - - make coverage - after_success: - - env - - bash <(curl -s https://codecov.io/bash) - - stage: openapi-checks - services: - - docker - before_install: - - docker pull openapitools/openapi-generator-cli - script: - - docker run -v ${PWD}:/local openapitools/openapi-generator-cli validate -i /local/openapi.json - - stage: bdd tests - services: - - docker - before_script: - - make - - wget -O docker-compose.yml https://raw.githubusercontent.com/RedHatInsights/insights-behavioral-spec/main/docker-compose.yml - - POSTGRES_DB_NAME=test docker-compose --profile test-aggregator up -d - - cid=$(docker ps | grep 'insights-behavioral-spec:latest' | cut -d ' ' -f 1) - - docker cp insights-results-aggregator $cid:`docker exec $cid bash -c 'echo "$VIRTUAL_ENV_BIN"'` - - docker cp openapi.json $cid:`docker exec $cid bash -c 'echo "$HOME"'` - - docker exec -u root $cid /bin/bash -c 'chmod +x $VIRTUAL_ENV_BIN/insights-results-aggregator' - script: - - docker exec -it $cid /bin/bash -c 'env && make aggregator-tests' - - stage: integration tests - script: - - make integration_tests - -stages: - - build - - style - - unit tests - - openapi-checks - - bdd tests - - integration tests diff --git a/Dockerfile b/Dockerfile index 0b349c99e..6f4ab11e8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,22 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM registry.redhat.io/rhel8/go-toolset:1.18.9-8.1675807488 AS builder +FROM registry.redhat.io/ubi9/go-toolset:1.20 AS builder COPY . . USER 0 # build the aggregator -RUN umask 0022 && \ - make build && \ - chmod a+x insights-results-aggregator +RUN umask 0022 +ENV GOFLAGS="-buildvcs=false" +RUN make build +RUN chmod a+x insights-results-aggregator -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8-1014 +FROM registry.access.redhat.com/ubi9/ubi-micro:latest COPY --from=builder /opt/app-root/src/insights-results-aggregator . COPY --from=builder /opt/app-root/src/openapi.json /openapi/openapi.json +# copy the certificates from builder image +COPY --from=builder /etc/ssl /etc/ssl +COPY --from=builder /etc/pki /etc/pki + USER 1001 CMD ["/insights-results-aggregator"] diff --git a/Makefile b/Makefile index 75783c92b..1c9f4dccb 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ SHELL := /bin/bash -.PHONY: default clean build fmt lint vet cyclo ineffassign shellcheck errcheck goconst gosec abcgo json-check openapi-check style run test cover integration_tests rest_api_tests sqlite_db license before_commit help godoc install_docgo install_addlicense +.PHONY: default clean build fmt lint vet cyclo ineffassign shellcheck errcheck goconst gosec abcgo json-check openapi-check style run test cover integration_tests rest_api_tests license before_commit help godoc install_docgo install_addlicense SOURCES:=$(shell find . -name '*.go') BINARY:=insights-results-aggregator @@ -90,10 +90,6 @@ rest_api_tests: ${BINARY} ## Run REST API tests @echo "Running REST API tests" @./test.sh rest_api -sqlite_db: - mv aggregator.db aggragator.db.backup - local_storage/create_database_sqlite.sh - license: install_addlicense addlicense -c "Red Hat, Inc" -l "apache" -v ./ diff --git a/README.md b/README.md index 48722750b..a0be1acb5 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ Aggregator service for insights results * [Description](#description) * [Documentation](#documentation) * [Makefile targets](#makefile-targets) +* [Usage](#usage) * [BDD tests](#bdd-tests) * [Package manifest](#package-manifest) @@ -32,8 +33,8 @@ Cluster Manager (OCM), Advanced Cluster Manager (ACM), and OCP WebConsole via Insights Operator. That data contain information about clusters status (especially health, security, performance, etc.) and recommendations based on results generated by Insights rules engine. Insights OCP data are consumed from -selected broker, stored in a storage (that basically works as a cache) and -exposed via REST API endpoints. +selected broker, stored in different storages (that basically work as a cache) +so we can expose it via REST API endpoints. ## Documentation @@ -69,6 +70,26 @@ help Show this help screen function_list List all functions in generated binary file ``` +## Usage + +``` +Usage: + + ./insights-results-aggregator [command] + +The commands are: + + starts aggregator + start-service starts aggregator + help prints help + print-help prints help + print-config prints current configuration set by files & env variables + print-env prints env variables + print-version-info prints version info + migration prints information about migrations (current, latest) + migration migrates database to the specified version +``` + ## BDD tests Behaviour tests for this service are included in [Insights Behavioral @@ -77,7 +98,7 @@ In order to run these tests, the following steps need to be made: 1. clone the [Insights Behavioral Spec](https://github.com/RedHatInsights/insights-behavioral-spec) repository 1. go into the cloned subdirectory `insights-behavioral-spec` -1. run the `aggregator_tests.sh` from this subdirectory +1. run the `insights_results_aggregator_tests.sh` from this subdirectory List of all test scenarios prepared for this service is available at diff --git a/aggregator.go b/aggregator.go index ab87f98a7..6aa354608 100644 --- a/aggregator.go +++ b/aggregator.go @@ -95,7 +95,7 @@ var ( // autoMigrate determines if the prepareDB function upgrades // the database to the latest migration version. This is necessary - // for certain tests that work with a temporary, empty SQLite DB. + // for unit tests that work with an empty DB. autoMigrate = false ) @@ -110,29 +110,45 @@ func fillInInfoParams(params map[string]string) { } // createStorage function initializes connection to preconfigured storage, -// usually SQLite, PostgreSQL, or AWS RDS. -func createStorage() (storage.Storage, error) { - storageCfg := conf.GetStorageConfiguration() - redisCfg := conf.GetRedisConfiguration() - // fill-in the missing sub-structure to have the whole Storage - // configuration represented as one data structure - storageCfg.RedisConfiguration = redisCfg - - log.Info().Str("type", storageCfg.Type).Msg("Storage type") - - // try to initialize connection to storage - dbStorage, err := storage.New(storageCfg) - if err != nil { - log.Error().Err(err).Msg("storage.New") - return nil, err +// usually PostgreSQL or AWS RDS. +func createStorage() (storage.OCPRecommendationsStorage, storage.DVORecommendationsStorage, error) { + ocpStorageCfg := conf.GetOCPRecommendationsStorageConfiguration() + // Redis configuration needs to be present in ocpStorageCfg, as the connection is created in the same function + ocpStorageCfg.RedisConfiguration = conf.GetRedisConfiguration() + + dvoStorageCfg := conf.GetDVORecommendationsStorageConfiguration() + + var ocpStorage storage.OCPRecommendationsStorage + var dvoStorage storage.DVORecommendationsStorage + var err error + + // create any storage we have configured + if ocpStorageCfg.Type != "" { + ocpStorage, err = storage.NewOCPRecommendationsStorage(ocpStorageCfg) + if err != nil { + log.Error().Err(err).Msg("storage.NewOCPRecommendationsStorage") + return nil, nil, err + } } - return dbStorage, nil + if dvoStorageCfg.Type != "" { + dvoStorage, err = storage.NewDVORecommendationsStorage(dvoStorageCfg) + if err != nil { + log.Error().Err(err).Msg("storage.NewDVORecommendationsStorage") + return nil, nil, err + } + } + + return ocpStorage, dvoStorage, nil } // closeStorage function closes specified DBStorage with proper error checking // whether the close operation was successful or not. func closeStorage(storage storage.Storage) { + if storage == nil { + return + } + err := storage.Close() if err != nil { // TODO: error state might be returned from this function @@ -144,10 +160,23 @@ func closeStorage(storage storage.Storage) { // autoMigrate is set performs migration to the latest schema version // available. func prepareDBMigrations(dbStorage storage.Storage) int { - if conf.GetStorageConfiguration().Type != types.SQLStorage { + driverType := dbStorage.GetDBDriverType() + if driverType != types.DBDriverPostgres { log.Info().Msg("Skipping migration for non-SQL database type") return ExitStatusOK } + + dbConn, dbSchema := dbStorage.GetConnection(), dbStorage.GetDBSchema() + + // ensure DB schema exists + if err := migration.InitDBSchema(dbConn, dbSchema); err != nil { + closeStorage(dbStorage) + log.Error().Err(err).Msg("Unable to initialize DB schema") + return ExitStatusPrepareDbError + } + + log.Debug().Msgf("%v DB schema found", dbSchema) + // This is only used by some unit tests. if autoMigrate { if err := dbStorage.MigrateToLatest(); err != nil { @@ -155,17 +184,19 @@ func prepareDBMigrations(dbStorage storage.Storage) int { return ExitStatusPrepareDbError } } else { - currentVersion, err := migration.GetDBVersion(dbStorage.GetConnection()) + currentVersion, err := migration.GetDBVersion(dbStorage.GetConnection(), dbStorage.GetDBSchema()) if err != nil { log.Error().Err(err).Msg("unable to check DB migration version") return ExitStatusPrepareDbError } + log.Debug().Msgf("%v DB schema current migration %v", dbSchema, currentVersion) - maxVersion := migration.GetMaxVersion() + maxVersion := dbStorage.GetMaxVersion() if currentVersion != maxVersion { log.Error().Msgf("old DB migration version (current: %d, latest: %d)", currentVersion, maxVersion) return ExitStatusPrepareDbError } + log.Debug().Msgf("%v DB schema maximum migration %v", dbSchema, maxVersion) } return ExitStatusOK @@ -174,27 +205,47 @@ func prepareDBMigrations(dbStorage storage.Storage) int { // prepareDB function opens a connection to database and loads all available // rule content into it. func prepareDB() int { - dbStorage, err := createStorage() + // TODO: when aggregator supports both storages at once, update the code below + // task to support both storages at once: https://issues.redhat.com/browse/CCXDEV-12316 + + ocpRecommendationsStorage, dvoRecommendationsStorage, err := createStorage() if err != nil { log.Error().Err(err).Msg("Error creating storage") return ExitStatusPrepareDbError } - defer closeStorage(dbStorage) - // Ensure that the DB is at the latest migration version. - if exitCode := prepareDBMigrations(dbStorage); exitCode != ExitStatusOK { - return exitCode - } + if ocpRecommendationsStorage != nil { + log.Debug().Msg("checking OCP storage migrations") + defer closeStorage(ocpRecommendationsStorage) - // Initialize the database. - err = dbStorage.Init() - if err != nil { - log.Error().Err(err).Msg("DB initialization error") - return ExitStatusPrepareDbError + // Ensure that the DB is at the latest migration version. + if exitCode := prepareDBMigrations(ocpRecommendationsStorage); exitCode != ExitStatusOK { + return exitCode + } + + // do not initialize last_checked_at map if we're running as dvo-writer + if conf.GetStorageBackendConfiguration().Use != types.DVORecommendationsStorage { + // Initialize the database. + err = ocpRecommendationsStorage.Init() + if err != nil { + log.Error().Err(err).Msg("DB initialization error") + return ExitStatusPrepareDbError + } + } + + // temporarily print some information from DB because of limited access to DB + ocpRecommendationsStorage.PrintRuleDisableDebugInfo() } - // temporarily print some information from DB because of limited access to DB - dbStorage.PrintRuleDisableDebugInfo() + if dvoRecommendationsStorage != nil { + log.Debug().Msg("checking DVO storage migrations") + defer closeStorage(dvoRecommendationsStorage) + + // Ensure that the DB is at the latest migration version. + if exitCode := prepareDBMigrations(dvoRecommendationsStorage); exitCode != ExitStatusOK { + return exitCode + } + } return ExitStatusOK } @@ -214,6 +265,8 @@ func startService() int { return prepDbExitCode } + log.Debug().Msg("DB initialized") + ctx, cancel := context.WithCancel(context.Background()) errorGroup := new(errgroup.Group) @@ -357,15 +410,37 @@ func printEnv() int { // migrations. Non-OK exit code is returned as the last return value in case // of an error. Otherwise, database and connection pointers are returned. func getDBForMigrations() (storage.Storage, *sql.DB, int) { - db, err := createStorage() + // use OCP recommendations storage only, unless migrations will be available for other storage(s) too + var db storage.Storage + + ocpStorage, dvoStorage, err := createStorage() if err != nil { log.Error().Err(err).Msg("Unable to prepare DB for migrations") return nil, nil, ExitStatusPrepareDbError } + // migrations are allowed only if a single storage backend is selected + backend := conf.GetStorageBackendConfiguration().Use + switch backend { + case types.OCPRecommendationsStorage: + db = ocpStorage + case types.DVORecommendationsStorage: + db = dvoStorage + default: + log.Error().Msg("storage backend does not support database migrations") + return nil, nil, ExitStatusMigrationError + } + dbConn := db.GetConnection() - if err := migration.InitInfoTable(dbConn); err != nil { + // ensure DB schema is created + if err := migration.InitDBSchema(dbConn, db.GetDBSchema()); err != nil { + closeStorage(db) + log.Error().Err(err).Msg("Unable to initialize DB schema") + return nil, nil, ExitStatusPrepareDbError + } + + if err := migration.InitInfoTable(dbConn, db.GetDBSchema()); err != nil { closeStorage(db) log.Error().Err(err).Msg("Unable to initialize migration info table") return nil, nil, ExitStatusPrepareDbError @@ -376,24 +451,25 @@ func getDBForMigrations() (storage.Storage, *sql.DB, int) { // printMigrationInfo function prints information about current DB migration // version without making any modifications. -func printMigrationInfo(dbConn *sql.DB) int { - currMigVer, err := migration.GetDBVersion(dbConn) +func printMigrationInfo(storage storage.Storage, dbConn *sql.DB) int { + currMigVer, err := migration.GetDBVersion(dbConn, storage.GetDBSchema()) if err != nil { log.Error().Err(err).Msg("Unable to get current DB version") return ExitStatusMigrationError } log.Info().Msgf("Current DB version: %d", currMigVer) - log.Info().Msgf("Maximum available version: %d", migration.GetMaxVersion()) + log.Info().Msgf("Maximum available version: %d", storage.GetMaxVersion()) return ExitStatusOK } // setMigrationVersion function attempts to migrate the DB to the target // version. -func setMigrationVersion(dbConn *sql.DB, dbDriver types.DBDriver, versStr string) int { +func setMigrationVersion(db storage.Storage, dbConn *sql.DB, versStr string) int { var targetVersion migration.Version + if versStrLower := strings.ToLower(versStr); versStrLower == "latest" || versStrLower == "max" { - targetVersion = migration.GetMaxVersion() + targetVersion = db.GetMaxVersion() } else { vers, err := strconv.Atoi(versStr) if err != nil { @@ -404,7 +480,7 @@ func setMigrationVersion(dbConn *sql.DB, dbDriver types.DBDriver, versStr string targetVersion = migration.Version(vers) } - if err := migration.SetDBVersion(dbConn, dbDriver, targetVersion); err != nil { + if err := migration.SetDBVersion(dbConn, db.GetDBDriverType(), db.GetDBSchema(), targetVersion, db.GetMigrations()); err != nil { log.Error().Err(err).Msg("Unable to perform migration") return ExitStatusMigrationError } @@ -427,10 +503,10 @@ func performMigrations() int { switch len(migrationArgs) { case 0: - return printMigrationInfo(dbConn) + return printMigrationInfo(db, dbConn) case 1: - return setMigrationVersion(dbConn, db.GetDBDriverType(), migrationArgs[0]) + return setMigrationVersion(db, dbConn, migrationArgs[0]) default: log.Error().Msg("Unexpected number of arguments to migrations command (expected 0-1)") diff --git a/aggregator_test.go b/aggregator_test.go index 86c7d94a4..361706401 100644 --- a/aggregator_test.go +++ b/aggregator_test.go @@ -65,7 +65,7 @@ func TestCreateStorage(t *testing.T) { os.Clearenv() mustLoadConfiguration("tests/config1") - _, err := main.CreateStorage() + _, _, err := main.CreateStorage() helpers.FailOnError(t, err) } @@ -76,11 +76,56 @@ func TestStartService(t *testing.T) { helpers.RunTestWithTimeout(t, func(t testing.TB) { os.Clearenv() + mustLoadConfiguration("./tests/tests") + + go func() { + main.StartService() + }() + + errCode := main.StopService() + assert.Equal(t, main.ExitStatusOK, errCode) + }, testsTimeout) + + *main.AutoMigratePtr = false +} + +func TestStartServiceDVOStorage(t *testing.T) { + // It is necessary to perform migrations for this test + // because the service won't run on top of an empty DB. + *main.AutoMigratePtr = true + + helpers.RunTestWithTimeout(t, func(t testing.TB) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") + + setEnvSettings(t, map[string]string{ + "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE": "dvo_recommendations", + "INSIGHTS_RESULTS_AGGREGATOR__METRICS__NAMESPACE": "dvo_writer", + }) + + go func() { + main.StartService() + }() + errCode := main.StopService() + assert.Equal(t, main.ExitStatusOK, errCode) + }, testsTimeout) + + *main.AutoMigratePtr = false +} + +// TestStartServiceBothStorages tests aggregator service config (no backend to use specified == use both) +func TestStartServiceBothStorages(t *testing.T) { + // It is necessary to perform migrations for this test + // because the service won't run on top of an empty DB. + *main.AutoMigratePtr = true + + helpers.RunTestWithTimeout(t, func(t testing.TB) { + os.Clearenv() mustLoadConfiguration("./tests/tests") + setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "sqlite3", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": ":memory:", + "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE": "", }) go func() { @@ -96,27 +141,32 @@ func TestStartService(t *testing.T) { func TestStartService_DBError(t *testing.T) { helpers.RunTestWithTimeout(t, func(t testing.TB) { + os.Clearenv() + buf := new(bytes.Buffer) log.Logger = zerolog.New(buf) setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "sqlite3", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": "/non/existing/path", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER": "sqlite3", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE": "sql", + "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE": "ocp_recommendations", }) exitCode := main.StartService() assert.Equal(t, main.ExitStatusPrepareDbError, exitCode) - assert.Contains(t, buf.String(), "unable to open database file: no such file or directory") + assert.Contains(t, buf.String(), "driver sqlite3 is not supported") }, testsTimeout) } func TestCreateStorage_BadDriver(t *testing.T) { + os.Clearenv() setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "non-existing-driver", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": "/non/existing/path", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE": "sql", + "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE": "ocp_recommendations", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER": "non-existing-driver", }) - _, err := main.CreateStorage() + _, _, err := main.CreateStorage() assert.EqualError(t, err, "driver non-existing-driver is not supported") } @@ -129,28 +179,19 @@ func TestCloseStorage_Error(t *testing.T) { mockStorage, expects := ira_helpers.MustGetMockStorageWithExpects(t) expects.ExpectClose().WillReturnError(fmt.Errorf(errStr)) - main.CloseStorage(mockStorage.(*storage.DBStorage)) + main.CloseStorage(mockStorage.(*storage.OCPRecommendationsDBStorage)) assert.Contains(t, buf.String(), errStr) } -func TestPrepareDB_DBError(t *testing.T) { - setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "non-existing-driver", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": "/non/existing/path", - }) - +func TestPrepareDB_DBErrorCode(t *testing.T) { errCode := main.PrepareDB() assert.Equal(t, main.ExitStatusPrepareDbError, errCode) } func TestPrepareDB(t *testing.T) { - setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "sqlite3", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": ":memory:", - - "INSIGHTS_RESULTS_AGGREGATOR__CONTENT__PATH": "./tests/content/ok/", - }) + os.Clearenv() + mustLoadConfiguration("./tests/tests") *main.AutoMigratePtr = true @@ -160,59 +201,40 @@ func TestPrepareDB(t *testing.T) { *main.AutoMigratePtr = false } -func TestPrepareDB_NoRulesDirectory(t *testing.T) { - setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "sqlite3", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": ":memory:", - - "INSIGHTS_RESULTS_AGGREGATOR__CONTENT__PATH": "/non-existing-path", - }) - - errCode := main.PrepareDB() - assert.Equal(t, main.ExitStatusPrepareDbError, errCode) -} - -func TestPrepareDB_BadRules(t *testing.T) { +func TestStartConsumer_BadBrokerAddress(t *testing.T) { setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "sqlite3", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": ":memory:", - - "INSIGHTS_RESULTS_AGGREGATOR__CONTENT__PATH": "./tests/content/bad_metadata_status/", - }) - - errCode := main.PrepareDB() - assert.Equal(t, main.ExitStatusPrepareDbError, errCode) -} + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER": "postgres", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE": "sql", + "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE": "ocp_recommendations", -func TestStartConsumer_DBError(t *testing.T) { - setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "non-existing-driver", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": "bad-data-source", + "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESSES": "non-existing-host:999999", + "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ENABLED": "true", }) err := main.StartConsumer(conf.GetBrokerConfiguration()) - assert.EqualError(t, err, "driver non-existing-driver is not supported") + assert.EqualError( + t, err, "kafka: client has run out of available brokers to talk to (Is your cluster reachable?)", + ) } -func TestStartConsumer_BadBrokerAddress(t *testing.T) { +func TestStartConsumer_BadBackendStorage(t *testing.T) { setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "sqlite3", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": ":memory:", - - "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESS": "non-existing-host:999999", - "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ENABLED": "true", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER": "postgres", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE": "sql", + "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE": "what a terrible failure", }) err := main.StartConsumer(conf.GetBrokerConfiguration()) assert.EqualError( - t, err, "kafka: client has run out of available brokers to talk to (Is your cluster reachable?)", + t, err, "no backend storage or incompatible selected", ) } func TestStartServer_DBError(t *testing.T) { setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "non-existing-driver", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": "bad-data-source", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER": "non-existing-driver", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE": "sql", + "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE": "ocp_recommendations", }) err := main.StartServer() @@ -221,8 +243,9 @@ func TestStartServer_DBError(t *testing.T) { func TestStartServer_BadServerAddress(t *testing.T) { setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "sqlite3", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": ":memory:", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER": "postgres", + "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE": "sql", + "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE": "ocp_recommendations", "INSIGHTS_RESULTS_AGGREGATOR__SERVER__ADDRESS": "localhost:999999", "INSIGHTS_RESULTS_AGGREGATOR__SERVER__API_SPEC_FILE": "openapi.json", @@ -233,17 +256,14 @@ func TestStartServer_BadServerAddress(t *testing.T) { } func TestStartService_BadBrokerAndServerAddress(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") setEnvSettings(t, map[string]string{ - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER": "sqlite3", - "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE": ":memory:", - - "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESS": "non-existing-host:1", - "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ENABLED": "true", + "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESSES": "non-existing-host:1", + "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ENABLED": "true", "INSIGHTS_RESULTS_AGGREGATOR__SERVER__ADDRESS": "non-existing-host:1", "INSIGHTS_RESULTS_AGGREGATOR__SERVER__API_SPEC_FILE": "openapi.json", - - "INSIGHTS_RESULTS_AGGREGATOR__CONTENT__PATH": "./tests/content/ok/", }) *main.AutoMigratePtr = true @@ -277,6 +297,8 @@ func TestPrintEnv(t *testing.T) { // TestGetDBForMigrations checks that the function ensures the existence of // the migration_info table and that the SQL DB connection works correctly. func TestGetDBForMigrations(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") db, dbConn, exitCode := main.GetDBForMigrations() assert.Equal(t, main.ExitStatusOK, exitCode) defer ira_helpers.MustCloseStorage(t, db) @@ -287,38 +309,62 @@ func TestGetDBForMigrations(t *testing.T) { assert.NoError(t, err, "unable to read version from migration info table") } +func TestGetDBForMigrationsDVO(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") + + setEnvSettings(t, map[string]string{ + "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE": "dvo_recommendations", + }) + + db, dbConn, exitCode := main.GetDBForMigrations() + assert.Equal(t, main.ExitStatusOK, exitCode) + defer ira_helpers.MustCloseStorage(t, db) + + row := dbConn.QueryRow("SELECT version FROM dvo.migration_info;") + var version migration.Version + err := row.Scan(&version) + assert.NoError(t, err, "unable to read version from migration info table") +} + // TestPrintMigrationInfo checks that printing migration info exits with OK code. func TestPrintMigrationInfo(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") db, dbConn, exitCode := main.GetDBForMigrations() assert.Equal(t, exitCode, main.ExitStatusOK) defer ira_helpers.MustCloseStorage(t, db) - exitCode = main.PrintMigrationInfo(dbConn) + exitCode = main.PrintMigrationInfo(db, dbConn) assert.Equal(t, main.ExitStatusOK, exitCode) } // TestPrintMigrationInfoClosedDB checks that printing migration info with // a closed DB connection results in a migration error exit code. func TestPrintMigrationInfoClosedDB(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") db, dbConn, exitCode := main.GetDBForMigrations() assert.Equal(t, exitCode, main.ExitStatusOK) // Close DB connection immediately. ira_helpers.MustCloseStorage(t, db) - exitCode = main.PrintMigrationInfo(dbConn) + exitCode = main.PrintMigrationInfo(db, dbConn) assert.Equal(t, main.ExitStatusMigrationError, exitCode) } // TestSetMigrationVersionZero checks that it is possible to set migration version to 0. func TestSetMigrationVersionZero(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") db, dbConn, exitCode := main.GetDBForMigrations() assert.Equal(t, exitCode, main.ExitStatusOK) defer ira_helpers.MustCloseStorage(t, db) - exitCode = main.SetMigrationVersion(dbConn, db.GetDBDriverType(), "0") + exitCode = main.SetMigrationVersion(db, dbConn, "0") assert.Equal(t, main.ExitStatusOK, exitCode) - version, err := migration.GetDBVersion(dbConn) + version, err := migration.GetDBVersion(dbConn, db.GetDBSchema()) assert.NoError(t, err, "unable to get migration version") assert.Equal(t, migration.Version(0), version) @@ -326,46 +372,54 @@ func TestSetMigrationVersionZero(t *testing.T) { // TestSetMigrationVersionZero checks that it is to upgrade DB to the latest migration. func TestSetMigrationVersionLatest(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") db, dbConn, exitCode := main.GetDBForMigrations() assert.Equal(t, exitCode, main.ExitStatusOK) defer ira_helpers.MustCloseStorage(t, db) - exitCode = main.SetMigrationVersion(dbConn, db.GetDBDriverType(), "latest") + exitCode = main.SetMigrationVersion(db, dbConn, "latest") assert.Equal(t, main.ExitStatusOK, exitCode) - version, err := migration.GetDBVersion(dbConn) + version, err := migration.GetDBVersion(dbConn, db.GetDBSchema()) assert.NoError(t, err, "unable to get migration version") - assert.Equal(t, migration.GetMaxVersion(), version) + assert.Equal(t, db.GetMaxVersion(), version) } // TestSetMigrationVersionClosedDB checks that setting the migration version // with a closed DB connection results in a migration error exit code. func TestSetMigrationVersionClosedDB(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") db, dbConn, exitCode := main.GetDBForMigrations() assert.Equal(t, exitCode, main.ExitStatusOK) // Close DB connection immediately. ira_helpers.MustCloseStorage(t, db) - exitCode = main.SetMigrationVersion(dbConn, db.GetDBDriverType(), "0") + exitCode = main.SetMigrationVersion(db, dbConn, "0") assert.Equal(t, main.ExitStatusMigrationError, exitCode) } // TestSetMigrationVersionInvalid checks that when supplied an invalid version // argument, the set version function exits with a migration error code. func TestSetMigrationVersionInvalid(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") db, dbConn, exitCode := main.GetDBForMigrations() assert.Equal(t, exitCode, main.ExitStatusOK) // Close DB connection immediately. ira_helpers.MustCloseStorage(t, db) - exitCode = main.SetMigrationVersion(dbConn, db.GetDBDriverType(), "") + exitCode = main.SetMigrationVersion(db, dbConn, "") assert.Equal(t, main.ExitStatusMigrationError, exitCode) } // TestPerformMigrationsPrint checks that the command for // printing migration info exits with the OK exit code. func TestPerformMigrationsPrint(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") oldArgs := os.Args os.Args = []string{os.Args[0], "migrations"} @@ -378,6 +432,9 @@ func TestPerformMigrationsPrint(t *testing.T) { // TestPerformMigrationsPrint checks that the command for // setting migration version exits with the OK exit code. func TestPerformMigrationsSet(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") + oldArgs := os.Args os.Args = []string{os.Args[0], "migrations", "0"} @@ -390,6 +447,9 @@ func TestPerformMigrationsSet(t *testing.T) { // TestPerformMigrationsPrint checks that supplying too many arguments // to the migration sub-commands results in the migration error exit code. func TestPerformMigrationsTooManyArgs(t *testing.T) { + os.Clearenv() + mustLoadConfiguration("./tests/tests") + oldArgs := os.Args os.Args = []string{os.Args[0], "migrations", "hello", "world"} @@ -407,7 +467,7 @@ func TestFillInInfoParams(t *testing.T) { // preliminary test if Go Universe is still ok assert.Empty(t, m, "Map should be empty at the beginning") - // try to fill-in all info params + // try to fill in all info params main.FillInInfoParams(m) // preliminary test if Go Universe is still ok diff --git a/broker/configuration.go b/broker/configuration.go index 6ee499a13..794ecb2cc 100644 --- a/broker/configuration.go +++ b/broker/configuration.go @@ -19,6 +19,7 @@ limitations under the License. package broker import ( + "crypto/sha512" "strings" "time" @@ -30,7 +31,7 @@ import ( // Configuration represents configuration of Kafka broker type Configuration struct { - Address string `mapstructure:"address" toml:"address"` + Addresses string `mapstructure:"addresses" toml:"addresses"` SecurityProtocol string `mapstructure:"security_protocol" toml:"security_protocol"` CertPath string `mapstructure:"cert_path" toml:"cert_path"` SaslMechanism string `mapstructure:"sasl_mechanism" toml:"sasl_mechanism"` @@ -63,20 +64,28 @@ func SaramaConfigFromBrokerConfig(cfg Configuration) (*sarama.Config, error) { if strings.Contains(cfg.SecurityProtocol, "SSL") { saramaConfig.Net.TLS.Enable = true } - if cfg.CertPath != "" { + + if strings.EqualFold(cfg.SecurityProtocol, "SSL") && cfg.CertPath != "" { tlsConfig, err := tlsutils.NewTLSConfig(cfg.CertPath) if err != nil { log.Error().Msgf("Unable to load TLS config for %s cert", cfg.CertPath) return nil, err } saramaConfig.Net.TLS.Config = tlsConfig - } - if strings.HasPrefix(cfg.SecurityProtocol, "SASL_") { + } else if strings.HasPrefix(cfg.SecurityProtocol, "SASL_") { log.Info().Msg("Configuring SASL authentication") saramaConfig.Net.SASL.Enable = true saramaConfig.Net.SASL.User = cfg.SaslUsername saramaConfig.Net.SASL.Password = cfg.SaslPassword saramaConfig.Net.SASL.Mechanism = sarama.SASLMechanism(cfg.SaslMechanism) + + if strings.EqualFold(cfg.SaslMechanism, sarama.SASLTypeSCRAMSHA512) { + log.Info().Msg("Configuring SCRAM-SHA512") + saramaConfig.Net.SASL.Handshake = true + saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &SCRAMClient{HashGeneratorFcn: sha512.New} + } + } } // ClientID is fully optional, but by setting it, we can get rid of some warning messages in logs diff --git a/broker/configuration_test.go b/broker/configuration_test.go index ff5f63c4b..2f7fd4f7f 100644 --- a/broker/configuration_test.go +++ b/broker/configuration_test.go @@ -67,11 +67,23 @@ func TestSaramaConfigFromBrokerConfig(t *testing.T) { assert.Equal(t, "username", saramaConfig.Net.SASL.User) assert.Equal(t, "password", saramaConfig.Net.SASL.Password) assert.Equal(t, "foobarbaz", saramaConfig.ClientID) + + cfg.SaslMechanism = "SCRAM-SHA-512" + saramaConfig, err = broker.SaramaConfigFromBrokerConfig(cfg) + helpers.FailOnError(t, err) + assert.Equal(t, sarama.V0_10_2_0, saramaConfig.Version) + assert.True(t, saramaConfig.Net.TLS.Enable) + assert.True(t, saramaConfig.Net.SASL.Enable) + assert.Equal(t, sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512), saramaConfig.Net.SASL.Mechanism) + assert.Equal(t, "username", saramaConfig.Net.SASL.User) + assert.Equal(t, "password", saramaConfig.Net.SASL.Password) + assert.Equal(t, "foobarbaz", saramaConfig.ClientID) } func TestBadConfiguration(t *testing.T) { cfg := broker.Configuration{ - CertPath: "missing_path", + SecurityProtocol: "SSL", + CertPath: "missing_path", } saramaCfg, err := broker.SaramaConfigFromBrokerConfig(cfg) diff --git a/broker/scram_client.go b/broker/scram_client.go new file mode 100644 index 000000000..c12b1eafe --- /dev/null +++ b/broker/scram_client.go @@ -0,0 +1,32 @@ +package broker + +import "github.com/xdg/scram" + +// SCRAMClient implementation for the SCRAM authentication +type SCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +// Begin prepares the client for the SCRAM exchange +func (x *SCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +// Step steps client through the SCRAM exchange +func (x *SCRAMClient) Step(challenge string) (response string, err error) { + response, err = x.ClientConversation.Step(challenge) + return +} + +// Done should return true when the SCRAM conversation +// is over. +func (x *SCRAMClient) Done() bool { + return x.ClientConversation.Done() +} diff --git a/build.sh b/build.sh index 343feae29..0fdeb76e4 100755 --- a/build.sh +++ b/build.sh @@ -14,7 +14,7 @@ # limitations under the License. # retrieve the latest tag set in repository -version=$(git describe --tags --abbrev=0) +version=$(git describe --always --tags --abbrev=0) buildtime=$(date) branch=$(git rev-parse --abbrev-ref HEAD) diff --git a/check_coverage.sh b/check_coverage.sh index 35500721b..c4822bfbc 100755 --- a/check_coverage.sh +++ b/check_coverage.sh @@ -14,7 +14,7 @@ # limitations under the License. -THRESHOLD=${COV_THRESHOLD:=78} +THRESHOLD=${COV_THRESHOLD:=60} RED_BG=$(tput setab 1) GREEN_BG=$(tput setab 2) diff --git a/conf/configuration.go b/conf/configuration.go index 76c707077..0edd23d77 100644 --- a/conf/configuration.go +++ b/conf/configuration.go @@ -56,6 +56,7 @@ import ( const ( configFileEnvVariableName = "INSIGHTS_RESULTS_AGGREGATOR_CONFIG_FILE" defaultOrgAllowlistFileName = "org_allowlist.csv" + defaultStorageBackend = "ocp_recommendations" noBrokerConfig = "warning: no broker configurations found in clowder config" noSaslConfig = "warning: SASL configuration is missing" @@ -68,6 +69,11 @@ type MetricsConfiguration struct { Namespace string `mapstructure:"namespace" toml:"namespace"` } +// StorageBackend contains global storage backend configuration +type StorageBackend struct { + Use string `mapstructure:"use" toml:"use"` +} + // ConfigStruct is a structure holding the whole service configuration type ConfigStruct struct { Broker broker.Configuration `mapstructure:"broker" toml:"broker"` @@ -75,13 +81,15 @@ type ConfigStruct struct { Processing struct { OrgAllowlistFile string `mapstructure:"org_allowlist_file" toml:"org_allowlist_file"` } `mapstructure:"processing"` - Storage storage.Configuration `mapstructure:"storage" toml:"storage"` - Logging logger.LoggingConfiguration `mapstructure:"logging" toml:"logging"` - CloudWatch logger.CloudWatchConfiguration `mapstructure:"cloudwatch" toml:"cloudwatch"` - Redis storage.RedisConfiguration `mapstructure:"redis" toml:"redis"` - Metrics MetricsConfiguration `mapstructure:"metrics" toml:"metrics"` - SentryLoggingConf logger.SentryLoggingConfiguration `mapstructure:"sentry" toml:"sentry"` - KafkaZerologConf logger.KafkaZerologConfiguration `mapstructure:"kafka_zerolog" toml:"kafka_zerolog"` + OCPRecommendationsStorage storage.Configuration `mapstructure:"ocp_recommendations_storage" toml:"ocp_recommendations_storage"` + DVORecommendationsStorage storage.Configuration `mapstructure:"dvo_recommendations_storage" toml:"dvo_recommendations_storage"` + StorageBackend StorageBackend `mapstructure:"storage_backend" toml:"storage_backend"` + Logging logger.LoggingConfiguration `mapstructure:"logging" toml:"logging"` + CloudWatch logger.CloudWatchConfiguration `mapstructure:"cloudwatch" toml:"cloudwatch"` + Redis storage.RedisConfiguration `mapstructure:"redis" toml:"redis"` + Metrics MetricsConfiguration `mapstructure:"metrics" toml:"metrics"` + SentryLoggingConf logger.SentryLoggingConfiguration `mapstructure:"sentry" toml:"sentry"` + KafkaZerologConf logger.KafkaZerologConfiguration `mapstructure:"kafka_zerolog" toml:"kafka_zerolog"` } // Config has exactly the same structure as *.toml file @@ -179,9 +187,21 @@ func getOrganizationAllowlist() mapset.Set { return allowlist } -// GetStorageConfiguration returns storage configuration -func GetStorageConfiguration() storage.Configuration { - return Config.Storage +// GetStorageBackendConfiguration returns storage backend configuration +func GetStorageBackendConfiguration() StorageBackend { + return Config.StorageBackend +} + +// GetOCPRecommendationsStorageConfiguration returns storage configuration for +// OCP recommendations database +func GetOCPRecommendationsStorageConfiguration() storage.Configuration { + return Config.OCPRecommendationsStorage +} + +// GetDVORecommendationsStorageConfiguration returns storage configuration for +// DVO recommendations database +func GetDVORecommendationsStorageConfiguration() storage.Configuration { + return Config.DVORecommendationsStorage } // GetRedisConfiguration returns Redis storage configuration @@ -271,6 +291,42 @@ func loadAllowlistFromCSV(r io.Reader) (mapset.Set, error) { return allowlist, nil } +func updateBrokerCfgFromClowder(configuration *ConfigStruct) { + // make sure broker(s) are configured in Clowder + if len(clowder.LoadedConfig.Kafka.Brokers) > 0 { + configuration.Broker.Addresses = "" + for _, broker := range clowder.LoadedConfig.Kafka.Brokers { + if broker.Port != nil { + configuration.Broker.Addresses += fmt.Sprintf("%s:%d", broker.Hostname, *broker.Port) + "," + } else { + configuration.Broker.Addresses += broker.Hostname + "," + } + } + // remove the extra comma + configuration.Broker.Addresses = configuration.Broker.Addresses[:len(configuration.Broker.Addresses)-1] + + // SSL config + clowderBrokerCfg := clowder.LoadedConfig.Kafka.Brokers[0] + if clowderBrokerCfg.Authtype != nil { + fmt.Println("kafka is configured to use authentication") + if clowderBrokerCfg.Sasl != nil { + configuration.Broker.SaslUsername = *clowderBrokerCfg.Sasl.Username + configuration.Broker.SaslPassword = *clowderBrokerCfg.Sasl.Password + configuration.Broker.SaslMechanism = *clowderBrokerCfg.Sasl.SaslMechanism + configuration.Broker.SecurityProtocol = *clowderBrokerCfg.SecurityProtocol + if caPath, err := clowder.LoadedConfig.KafkaCa(clowderBrokerCfg); err == nil { + configuration.Broker.CertPath = caPath + } + } else { + fmt.Println(noSaslConfig) + } + } + } else { + fmt.Println(noBrokerConfig) + } + updateTopicsMapping(configuration) +} + // updateConfigFromClowder updates the current config with the values defined in clowder func updateConfigFromClowder(c *ConfigStruct) error { if !clowder.IsClowderEnabled() { @@ -283,45 +339,14 @@ func updateConfigFromClowder(c *ConfigStruct) error { if clowder.LoadedConfig.Kafka == nil { fmt.Println("No Kafka configuration available in Clowder, using default one") } else { - if len(clowder.LoadedConfig.Kafka.Brokers) > 0 { - broker := clowder.LoadedConfig.Kafka.Brokers[0] - // port can be empty in clowder, so taking it into account - if broker.Port != nil { - c.Broker.Address = fmt.Sprintf("%s:%d", broker.Hostname, *broker.Port) - } else { - c.Broker.Address = broker.Hostname - } - - // SSL config - if broker.Authtype != nil { - if broker.Sasl != nil { - c.Broker.SaslUsername = *broker.Sasl.Username - c.Broker.SaslPassword = *broker.Sasl.Password - c.Broker.SaslMechanism = *broker.Sasl.SaslMechanism - c.Broker.SecurityProtocol = *broker.Sasl.SecurityProtocol - if caPath, err := clowder.LoadedConfig.KafkaCa(broker); err == nil { - c.Broker.CertPath = caPath - } - } else { - fmt.Println(noSaslConfig) - } - } - } else { - fmt.Println(noBrokerConfig) - } - - if err := updateTopicsMapping(c); err != nil { - fmt.Println("warning: an error occurred when applying new topics") - } + updateBrokerCfgFromClowder(c) } + // get DB configuration from clowder if clowder.LoadedConfig.Database != nil { - // get DB configuration from clowder - c.Storage.PGDBName = clowder.LoadedConfig.Database.Name - c.Storage.PGHost = clowder.LoadedConfig.Database.Hostname - c.Storage.PGPort = clowder.LoadedConfig.Database.Port - c.Storage.PGUsername = clowder.LoadedConfig.Database.Username - c.Storage.PGPassword = clowder.LoadedConfig.Database.Password + // we're currently using the same storage in all Clowderized envs (stage/prod) + updateStorageConfFromClowder(&c.OCPRecommendationsStorage) + updateStorageConfFromClowder(&c.DVORecommendationsStorage) } else { fmt.Println(noStorage) } @@ -329,7 +354,15 @@ func updateConfigFromClowder(c *ConfigStruct) error { return nil } -func updateTopicsMapping(c *ConfigStruct) error { +func updateStorageConfFromClowder(conf *storage.Configuration) { + conf.PGDBName = clowder.LoadedConfig.Database.Name + conf.PGHost = clowder.LoadedConfig.Database.Hostname + conf.PGPort = clowder.LoadedConfig.Database.Port + conf.PGUsername = clowder.LoadedConfig.Database.Username + conf.PGPassword = clowder.LoadedConfig.Database.Password +} + +func updateTopicsMapping(c *ConfigStruct) { // Updating topics from clowder mapping if available if topicCfg, ok := clowder.KafkaTopics[c.Broker.Topic]; ok { c.Broker.Topic = topicCfg.Name @@ -348,6 +381,4 @@ func updateTopicsMapping(c *ConfigStruct) error { } else { fmt.Printf(noTopicMapping, c.Broker.PayloadTrackerTopic) } - - return nil } diff --git a/conf/configuration_test.go b/conf/configuration_test.go index c5e4cb87e..1ec939c3c 100644 --- a/conf/configuration_test.go +++ b/conf/configuration_test.go @@ -55,7 +55,7 @@ func removeFile(t *testing.T, filename string) { func setEnvVariables(t *testing.T) { os.Clearenv() - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESS", "localhost:9093") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESSES", "localhost:9093") mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__BROKER__TOPIC", "platform.results.ccx") mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__BROKER__GROUP", "aggregator") mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__BROKER__ENABLED", "true") @@ -67,23 +67,33 @@ func setEnvVariables(t *testing.T) { mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__PROCESSING__ORG_ALLOWLIST", "org_allowlist.csv") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER", "sqlite3") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__SQLITE_DATASOURCE", ":memory:") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_USERNAME", "user") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_PASSWORD", "password") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_HOST", "localhost") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_PORT", "5432") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_DB_NAME", "aggregator") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_PARAMS", "params") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__LOG_SQL_QUERIES", "true") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__TYPE", "sql") - - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__CONTENT__PATH", "/rules-content") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER", "postgres") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_USERNAME", "user") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_PASSWORD", "password") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_HOST", "localhost") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_PORT", "5432") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_DB_NAME", "aggregator") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_PARAMS", "params") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__LOG_SQL_QUERIES", "true") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE", "sql") + + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__DB_DRIVER", "postgres") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_USERNAME", "user") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_PASSWORD", "password") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_HOST", "localhost") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_PORT", "5432") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_DB_NAME", "aggregator") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_PARAMS", "params") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__LOG_SQL_QUERIES", "true") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__TYPE", "sql") mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__REDIS__ENDPOINT", "default-redis-endpoint") mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__REDIS__DATABASE", "42") mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__REDIS__TIMEOUT_SECONDS", "0") mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__REDIS__PASSWORD", "top secret") + + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__SENTRY__DSN", "test.example.com") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__SENTRY__ENVIRONMENT", "test") } func mustSetEnv(t *testing.T, key, val string) { @@ -142,7 +152,7 @@ func TestLoadBrokerConfiguration(t *testing.T) { brokerCfg := conf.GetBrokerConfiguration() - assert.Equal(t, "localhost:29092", brokerCfg.Address) + assert.Equal(t, "localhost:29092", brokerCfg.Addresses) assert.Equal(t, "platform.results.ccx", brokerCfg.Topic) assert.Equal(t, "aggregator", brokerCfg.Group) assert.Equal(t, expectedTimeout, brokerCfg.Timeout) @@ -158,18 +168,60 @@ func TestLoadServerConfiguration(t *testing.T) { assert.Equal(t, "/api/v1/", serverCfg.APIPrefix) } -// TestLoadStorageConfiguration tests loading the storage configuration sub-tree -func TestLoadStorageConfiguration(t *testing.T) { +// TestLoadStorageBackendConfigurationChangedFromEnvVar tests loading the +// storage backend configuration subtree +func TestLoadStorageBackendConfigurationChangedFromEnvVar(t *testing.T) { + os.Clearenv() + + const configPath = "../tests/config1" + + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE", "dvo_recommendations") + + mustLoadConfiguration(configPath) + + storageCfg := conf.GetStorageBackendConfiguration() + assert.Equal(t, "dvo_recommendations", storageCfg.Use) +} + +// TestLoadStorageBackendConfigurationChangedFromEnvVar tests loading the +// storage backend configuration subtree doesn't change from empty +func TestLoadStorageBackendConfigurationNotChangedWhenEmpty(t *testing.T) { + os.Clearenv() + + const configPath = "../tests/config1" + + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE", "") + mustLoadConfiguration(configPath) + + storageCfg := conf.GetStorageBackendConfiguration() + assert.Equal(t, "", storageCfg.Use) +} + +// TestLoadOCPRecommendationsStorageConfiguration tests loading the OCP +// recommendations storage configuration subtree +func TestLoadOCPRecommendationsStorageConfiguration(t *testing.T) { + TestLoadConfiguration(t) + + storageCfg := conf.GetOCPRecommendationsStorageConfiguration() + + assert.Equal(t, "postgres", storageCfg.Driver) + assert.Equal(t, "sql", storageCfg.Type) +} + +// TestLoadDVORecommendationsStorageConfiguration tests loading the DVO +// recommendations storage configuration subtree +func TestLoadDVORecommendationsStorageConfiguration(t *testing.T) { TestLoadConfiguration(t) - storageCfg := conf.GetStorageConfiguration() + storageCfg := conf.GetDVORecommendationsStorageConfiguration() - assert.Equal(t, "sqlite3", storageCfg.Driver) - assert.Equal(t, ":memory:", storageCfg.SQLiteDataSource) + assert.Equal(t, "postgres", storageCfg.Driver) + assert.Equal(t, "user", storageCfg.PGUsername) + assert.Equal(t, "password", storageCfg.PGPassword) assert.Equal(t, "sql", storageCfg.Type) } -// TestLoadRedisConfiguration tests loading the Redis configuration sub-tree +// TestLoadRedisConfiguration tests loading the Redis configuration subtree func TestLoadRedisConfiguration(t *testing.T) { TestLoadConfiguration(t) @@ -181,47 +233,81 @@ func TestLoadRedisConfiguration(t *testing.T) { assert.Equal(t, "", redisCfg.RedisPassword) } -// TestLoadConfigurationOverrideFromEnv tests overriding configuration by env variables -func TestLoadConfigurationOverrideFromEnv(t *testing.T) { +// TestLoadConfigurationOverrideFromEnv1 tests overriding configuration by env variables +func TestLoadConfigurationOverrideFromEnv1(t *testing.T) { os.Clearenv() const configPath = "../tests/config1" mustLoadConfiguration(configPath) - storageCfg := conf.GetStorageConfiguration() + storageCfg := conf.GetOCPRecommendationsStorageConfiguration() assert.Equal(t, storage.Configuration{ - Driver: "sqlite3", - SQLiteDataSource: ":memory:", - PGUsername: "user", - PGPassword: "password", - PGHost: "localhost", - PGPort: 5432, - PGDBName: "aggregator", - PGParams: "", - Type: "sql", + Driver: "postgres", + PGUsername: "user", + PGPassword: "password", + PGHost: "localhost", + PGPort: 5432, + PGDBName: "aggregator", + PGParams: "", + Type: "sql", }, storageCfg) - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER", "postgres") - mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_PASSWORD", "some very secret password") + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_PASSWORD", "some very secret password") mustLoadConfiguration(configPath) - storageCfg = conf.GetStorageConfiguration() + storageCfg = conf.GetOCPRecommendationsStorageConfiguration() assert.Equal(t, storage.Configuration{ - Driver: "postgres", - SQLiteDataSource: ":memory:", - PGUsername: "user", - PGPassword: "some very secret password", - PGHost: "localhost", - PGPort: 5432, - PGDBName: "aggregator", - PGParams: "", - Type: "sql", + Driver: "postgres", + PGUsername: "user", + PGPassword: "some very secret password", + PGHost: "localhost", + PGPort: 5432, + PGDBName: "aggregator", + PGParams: "", + Type: "sql", }, storageCfg) } -// TestLoadOrganizationAllowlist tests if the allowlist CSV file gets loaded properly +// TestLoadConfigurationOverrideFromEnv2 tests overriding configuration by env variables +func TestLoadConfigurationOverrideFromEnv2(t *testing.T) { + os.Clearenv() + + const configPath = "../tests/config1" + + mustLoadConfiguration(configPath) + + storageCfg := conf.GetDVORecommendationsStorageConfiguration() + assert.Equal(t, storage.Configuration{ + Driver: "postgres", + PGUsername: "user", + PGPassword: "password", + PGHost: "localhost", + PGPort: 5432, + PGDBName: "aggregator", + PGParams: "", + Type: "sql", + }, storageCfg) + + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_PASSWORD", "some very secret password") + + mustLoadConfiguration(configPath) + + storageCfg = conf.GetDVORecommendationsStorageConfiguration() + assert.Equal(t, storage.Configuration{ + Driver: "postgres", + PGUsername: "user", + PGPassword: "some very secret password", + PGHost: "localhost", + PGPort: 5432, + PGDBName: "aggregator", + PGParams: "", + Type: "sql", + }, storageCfg) +} + +// TestLoadOrganizationAllowlist tests if the allow-list CSV file gets loaded properly func TestLoadOrganizationAllowlist(t *testing.T) { expectedAllowlist := mapset.NewSetWith( types.OrgID(1), @@ -264,27 +350,23 @@ str func TestLoadConfigurationFromFile(t *testing.T) { config := `[broker] - address = "localhost:29092" + Addresses = "localhost:29092" topic = "platform.results.ccx" group = "aggregator" enabled = true enable_org_allowlist = true - [content] - path = "/rules-content" - [processing] org_allowlist_file = "org_allowlist.csv" [server] - address = ":8080" + Addresses = ":8080" api_prefix = "/api/v1/" api_spec_file = "openapi.json" debug = true - [storage] - db_driver = "sqlite3" - sqlite_datasource = ":memory:" + [ocp_recommendations_storage] + db_driver = "postgres" pg_username = "user" pg_password = "password" pg_host = "localhost" @@ -299,6 +381,10 @@ func TestLoadConfigurationFromFile(t *testing.T) { endpoint = "localhost:6379" password = "" timeout_seconds = 30 + + [sentry] + dsn = "test.example2.com" + environment = "test2" ` tmpFilename, err := GetTmpConfigFile(config) @@ -312,7 +398,7 @@ func TestLoadConfigurationFromFile(t *testing.T) { brokerCfg := conf.GetBrokerConfiguration() - assert.Equal(t, "localhost:29092", brokerCfg.Address) + assert.Equal(t, "localhost:29092", brokerCfg.Addresses) assert.Equal(t, "platform.results.ccx", brokerCfg.Topic) assert.Equal(t, "aggregator", brokerCfg.Group) assert.Equal(t, true, brokerCfg.Enabled) @@ -343,17 +429,16 @@ func TestLoadConfigurationFromFile(t *testing.T) { ) assert.Equal(t, storage.Configuration{ - Driver: "sqlite3", - SQLiteDataSource: ":memory:", - LogSQLQueries: true, - PGUsername: "user", - PGPassword: "password", - PGHost: "localhost", - PGPort: 5432, - PGDBName: "aggregator", - PGParams: "params", - Type: "sql", - }, conf.GetStorageConfiguration()) + Driver: "postgres", + LogSQLQueries: true, + PGUsername: "user", + PGPassword: "password", + PGHost: "localhost", + PGPort: 5432, + PGDBName: "aggregator", + PGParams: "params", + Type: "sql", + }, conf.GetOCPRecommendationsStorageConfiguration()) assert.Equal(t, storage.RedisConfiguration{ RedisEndpoint: "localhost:6379", @@ -361,6 +446,11 @@ func TestLoadConfigurationFromFile(t *testing.T) { RedisTimeoutSeconds: 30, RedisPassword: "", }, conf.GetRedisConfiguration()) + + assert.Equal(t, logger.SentryLoggingConfiguration{ + SentryDSN: "test.example2.com", + SentryEnvironment: "test2", + }, conf.GetSentryLoggingConfiguration()) } func TestLoadConfigurationFromEnv(t *testing.T) { @@ -370,7 +460,7 @@ func TestLoadConfigurationFromEnv(t *testing.T) { brokerCfg := conf.GetBrokerConfiguration() - assert.Equal(t, "localhost:9093", brokerCfg.Address) + assert.Equal(t, "localhost:9093", brokerCfg.Addresses) assert.Equal(t, "platform.results.ccx", brokerCfg.Topic) assert.Equal(t, "aggregator", brokerCfg.Group) assert.Equal(t, true, brokerCfg.Enabled) @@ -401,17 +491,16 @@ func TestLoadConfigurationFromEnv(t *testing.T) { ) assert.Equal(t, storage.Configuration{ - Driver: "sqlite3", - SQLiteDataSource: ":memory:", - LogSQLQueries: true, - PGUsername: "user", - PGPassword: "password", - PGHost: "localhost", - PGPort: 5432, - PGDBName: "aggregator", - PGParams: "params", - Type: "sql", - }, conf.GetStorageConfiguration()) + Driver: "postgres", + LogSQLQueries: true, + PGUsername: "user", + PGPassword: "password", + PGHost: "localhost", + PGPort: 5432, + PGDBName: "aggregator", + PGParams: "params", + Type: "sql", + }, conf.GetOCPRecommendationsStorageConfiguration()) assert.Equal(t, storage.RedisConfiguration{ RedisEndpoint: "default-redis-endpoint", @@ -419,6 +508,11 @@ func TestLoadConfigurationFromEnv(t *testing.T) { RedisTimeoutSeconds: 0, RedisPassword: "top secret", }, conf.GetRedisConfiguration()) + + assert.Equal(t, logger.SentryLoggingConfiguration{ + SentryDSN: "test.example.com", + SentryEnvironment: "test", + }, conf.GetSentryLoggingConfiguration()) } func TestGetLoggingConfigurationDefault(t *testing.T) { @@ -498,10 +592,10 @@ func TestLoadConfigurationFromEnvVariableClowderEnabled(t *testing.T) { // retrieve broker config brokerCfg := conf.GetBrokerConfiguration() - storageCfg := conf.GetStorageConfiguration() + storageCfg := conf.GetOCPRecommendationsStorageConfiguration() // check - assert.Equal(t, "localhost:29092", brokerCfg.Address, "Broker doesn't match") + assert.Equal(t, "localhost:29092", brokerCfg.Addresses, "Broker doesn't match") assert.Equal(t, "platform.results.ccx", brokerCfg.Topic, "Topic doesn't match") assert.Equal(t, testDB, storageCfg.PGDBName) } @@ -544,6 +638,57 @@ func TestClowderConfigForKafka(t *testing.T) { conf.Config.Broker.OrgAllowlistEnabled = false brokerCfg := conf.GetBrokerConfiguration() - assert.Equal(t, fmt.Sprintf("%s:%d", hostname, port), brokerCfg.Address) + assert.Equal(t, fmt.Sprintf("%s:%d", hostname, port), brokerCfg.Addresses) assert.Equal(t, newTopicName, conf.Config.Broker.Topic) } + +// TestClowderConfigForStorage tests loading the config file for testing from an +// environment variable. Clowder config is enabled in this case, checking the database +// configuration. +func TestClowderConfigForStorage(t *testing.T) { + os.Clearenv() + + var name = "db" + var hostname = "hostname" + var port = 8888 + var username = "username" + var password = "password" + + // explicit database and broker config + clowder.LoadedConfig = &clowder.AppConfig{ + Database: &clowder.DatabaseConfig{ + Name: name, + Hostname: hostname, + Port: port, + Username: username, + Password: password, + }, + } + + mustSetEnv(t, "INSIGHTS_RESULTS_AGGREGATOR_CONFIG_FILE", "../tests/config1") + mustSetEnv(t, "ACG_CONFIG", "tests/clowder_config.json") + + err := conf.LoadConfiguration("config") + assert.NoError(t, err, "Failed loading configuration file") + + ocpStorageConf := conf.GetOCPRecommendationsStorageConfiguration() + assert.Equal(t, name, ocpStorageConf.PGDBName) + assert.Equal(t, hostname, ocpStorageConf.PGHost) + assert.Equal(t, port, ocpStorageConf.PGPort) + assert.Equal(t, username, ocpStorageConf.PGUsername) + assert.Equal(t, password, ocpStorageConf.PGPassword) + // rest of config outside of clowder must be loaded correctly + assert.Equal(t, "postgres", ocpStorageConf.Driver) + assert.Equal(t, "sql", ocpStorageConf.Type) + + // same config loaded for DVO storage in envs using clowder (stage/prod) + dvoStorageConf := conf.GetDVORecommendationsStorageConfiguration() + assert.Equal(t, name, dvoStorageConf.PGDBName) + assert.Equal(t, hostname, dvoStorageConf.PGHost) + assert.Equal(t, port, dvoStorageConf.PGPort) + assert.Equal(t, username, dvoStorageConf.PGUsername) + assert.Equal(t, password, dvoStorageConf.PGPassword) + // rest of config outside of clowder must be loaded correctly + assert.Equal(t, "postgres", dvoStorageConf.Driver) + assert.Equal(t, "sql", dvoStorageConf.Type) +} diff --git a/config-devel.toml b/config-devel.toml index 887b787aa..df3ecfa43 100644 --- a/config-devel.toml +++ b/config-devel.toml @@ -1,5 +1,5 @@ [broker] -address = "kafka:29092" +addresses = "kafka:29092" security_protocol = "" sasl_mechanism = "PLAIN" sasl_username = "username" @@ -27,7 +27,7 @@ org_overview_limit_hours = 2 [processing] org_allowlist_file = "org_allowlist.csv" -[storage] +[ocp_recommendations_storage] db_driver = "postgres" pg_username = "postgres" pg_password = "postgres" @@ -38,6 +38,20 @@ pg_params = "sslmode=disable" log_sql_queries = true type = "sql" +[dvo_recommendations_storage] +db_driver = "postgres" +pg_username = "postgres" +pg_password = "postgres" +pg_host = "localhost" +pg_port = 5432 +pg_db_name = "aggregator" +pg_params = "sslmode=disable" +log_sql_queries = true +type = "sql" + +[storage_backend] +use = "ocp_recommendations" + [redis] database = 0 endpoint = "localhost:6379" @@ -51,6 +65,7 @@ path = "./tests/content/ok/" debug = true log_level = "" logging_to_cloud_watch_enabled = false +logging_to_sentry_enabled = false [cloudwatch] aws_access_id = "a key id" diff --git a/config.toml b/config.toml index e3ee8b94e..9651d292c 100644 --- a/config.toml +++ b/config.toml @@ -1,5 +1,5 @@ [broker] -address = "localhost:29092" +addresses = "localhost:29092" security_protocol = "" sasl_mechanism = "PLAIN" sasl_username = "username" @@ -27,12 +27,31 @@ org_overview_limit_hours = 2 [processing] org_allowlist_file = "org_allowlist.csv" -[storage] -db_driver = "sqlite3" -sqlite_datasource = "./aggregator.db" +[ocp_recommendations_storage] +db_driver = "postgres" +pg_username = "user" +pg_password = "password" +pg_host = "localhost" +pg_port = 5432 +pg_db_name = "aggregator" +pg_params = "" log_sql_queries = true type = "sql" +[dvo_recommendations_storage] +db_driver = "postgres" +pg_username = "user" +pg_password = "password" +pg_host = "localhost" +pg_port = 5432 +pg_db_name = "aggregator" +pg_params = "" +log_sql_queries = true +type = "sql" + +[storage_backend] +use = "" + [redis] database = 0 endpoint = "localhost:6379" @@ -46,6 +65,7 @@ path = "/rules-content" debug = false log_level = "" logging_to_cloud_watch_enabled = false +logging_to_sentry_enabled = false [cloudwatch] aws_access_id = "a key id" diff --git a/consumer.go b/consumer.go index bcf0eb715..87038ffaf 100644 --- a/consumer.go +++ b/consumer.go @@ -16,11 +16,14 @@ package main import ( "context" + "errors" "github.com/rs/zerolog/log" "github.com/RedHatInsights/insights-results-aggregator/broker" + "github.com/RedHatInsights/insights-results-aggregator/conf" "github.com/RedHatInsights/insights-results-aggregator/consumer" + "github.com/RedHatInsights/insights-results-aggregator/types" ) var ( @@ -33,17 +36,32 @@ var ( func startConsumer(brokerConf broker.Configuration) error { defer finishConsumerInstanceInitialization() - dbStorage, err := createStorage() + // right now just the OCP recommendation storage is handled by consumer + ocpRecommendationsStorage, dvoRecommendationStorage, err := createStorage() if err != nil { return err } - defer closeStorage(dbStorage) - - consumerInstance, err = consumer.New(brokerConf, dbStorage) - if err != nil { - log.Error().Err(err).Msg("Broker initialization error") - return err + defer closeStorage(ocpRecommendationsStorage) + defer closeStorage(dvoRecommendationStorage) + + // when DVO consumer will be made, it will need to use DVO storage + // (see line that calls createStorage()) + if conf.GetStorageBackendConfiguration().Use == types.OCPRecommendationsStorage { + consumerInstance, err = consumer.NewOCPRulesConsumer(brokerConf, ocpRecommendationsStorage) + if err != nil { + log.Error().Err(err).Msg("Broker initialization error") + return err + } + } else if conf.GetStorageBackendConfiguration().Use == types.DVORecommendationsStorage { + consumerInstance, err = consumer.NewDVORulesConsumer(brokerConf, dvoRecommendationStorage) + if err != nil { + log.Error().Err(err).Msg("Broker initialization error") + return err + } + } else { + log.Error().Msg("No backend storage or incompatible one selected. Exitting") + return errors.New("no backend storage or incompatible selected") } finishConsumerInstanceInitialization() diff --git a/consumer/benchmark_test.go b/consumer/benchmark_test.go index 32a7cf384..f2831ec01 100644 --- a/consumer/benchmark_test.go +++ b/consumer/benchmark_test.go @@ -22,7 +22,6 @@ import ( "github.com/RedHatInsights/insights-operator-utils/tests/helpers" "github.com/RedHatInsights/insights-results-aggregator-data/testdata" - _ "github.com/mattn/go-sqlite3" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -31,9 +30,10 @@ import ( ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" ) -func benchmarkProcessingMessage(b *testing.B, s storage.Storage, messageProducer func() string) { +func benchmarkProcessingOCPMessage(b *testing.B, s storage.OCPRecommendationsStorage, messageProducer func() string) { kafkaConsumer := &consumer.KafkaConsumer{ - Storage: s, + Storage: s, + MessageProcessor: consumer.OCPRulesProcessor{}, } b.ResetTimer() @@ -42,26 +42,22 @@ func benchmarkProcessingMessage(b *testing.B, s storage.Storage, messageProducer } } -func getNoopStorage(testing.TB, bool) (storage.Storage, func()) { - return &storage.NoopStorage{}, func() {} +func getNoopStorage(testing.TB, bool) (storage.OCPRecommendationsStorage, func()) { + return &storage.NoopOCPStorage{}, func() {} } -func BenchmarkKafkaConsumer_ProcessMessage_SimpleMessages(b *testing.B) { +func BenchmarkOCPRulesConsumer_ProcessMessage_SimpleMessages(b *testing.B) { zerolog.SetGlobalLevel(zerolog.WarnLevel) var testCases = []struct { Name string - StorageProducer func(testing.TB, bool) (storage.Storage, func()) + StorageProducer func(testing.TB, bool) (storage.OCPRecommendationsStorage, func()) RandomMessages bool }{ {"NoopStorage", getNoopStorage, false}, {"NoopStorage", getNoopStorage, true}, - {"SQLiteInMemory", ira_helpers.MustGetSQLiteMemoryStorage, false}, - {"SQLiteInMemory", ira_helpers.MustGetSQLiteMemoryStorage, true}, {"Postgres", ira_helpers.MustGetPostgresStorage, false}, {"Postgres", ira_helpers.MustGetPostgresStorage, true}, - {"SQLiteFile", ira_helpers.MustGetSQLiteFileStorage, false}, - {"SQLiteFile", ira_helpers.MustGetSQLiteFileStorage, true}, } for _, testCase := range testCases { @@ -77,9 +73,9 @@ func BenchmarkKafkaConsumer_ProcessMessage_SimpleMessages(b *testing.B) { defer ira_helpers.MustCloseStorage(b, benchStorage) if testCase.RandomMessages { - benchmarkProcessingMessage(b, benchStorage, testdata.GetRandomConsumerMessage) + benchmarkProcessingOCPMessage(b, benchStorage, testdata.GetRandomConsumerMessage) } else { - benchmarkProcessingMessage(b, benchStorage, func() string { + benchmarkProcessingOCPMessage(b, benchStorage, func() string { return testdata.ConsumerMessage }) } @@ -87,11 +83,14 @@ func BenchmarkKafkaConsumer_ProcessMessage_SimpleMessages(b *testing.B) { } } -func getMessagesFromDir(b *testing.B, dataDir string) []string { +func getOCPMessagesFromDir(b *testing.B, dataDir string) []string { files, err := os.ReadDir(dataDir) helpers.FailOnError(b, err) var messages []string + c := consumer.KafkaConsumer{ + MessageProcessor: consumer.OCPRulesProcessor{}, + } for _, file := range files { if file.Type().IsRegular() { @@ -102,7 +101,7 @@ func getMessagesFromDir(b *testing.B, dataDir string) []string { helpers.FailOnError(b, err) zerolog.SetGlobalLevel(zerolog.Disabled) - parsedMessage, err := consumer.DeserializeMessage(fileBytes) + parsedMessage, err := consumer.DeserializeMessage(&c, fileBytes) zerolog.SetGlobalLevel(zerolog.WarnLevel) if err != nil { log.Warn().Msgf("skipping file %+v because it has bad structure", file.Name()) @@ -124,19 +123,17 @@ func getMessagesFromDir(b *testing.B, dataDir string) []string { return messages } -func BenchmarkKafkaConsumer_ProcessMessage_RealMessages(b *testing.B) { +func BenchmarkOCPRulesConsumer_ProcessMessage_RealMessages(b *testing.B) { zerolog.SetGlobalLevel(zerolog.WarnLevel) - messages := getMessagesFromDir(b, "../utils/produce_insights_results/") + messages := getOCPMessagesFromDir(b, "../utils/produce_insights_results/") var testCases = []struct { Name string - StorageProducer func(testing.TB, bool) (storage.Storage, func()) + StorageProducer func(testing.TB, bool) (storage.OCPRecommendationsStorage, func()) }{ {"NoopStorage", getNoopStorage}, - {"SQLiteInMemory", ira_helpers.MustGetSQLiteMemoryStorage}, {"Postgres", ira_helpers.MustGetPostgresStorage}, - {"SQLiteFile", ira_helpers.MustGetSQLiteFileStorage}, } for _, testCase := range testCases { @@ -150,7 +147,8 @@ func BenchmarkKafkaConsumer_ProcessMessage_RealMessages(b *testing.B) { defer ira_helpers.MustCloseStorage(b, benchStorage) kafkaConsumer := &consumer.KafkaConsumer{ - Storage: benchStorage, + Storage: benchStorage, + MessageProcessor: consumer.OCPRulesProcessor{}, } b.ResetTimer() diff --git a/consumer/consumer.go b/consumer/consumer.go index 55f48bb2e..0b49ebf38 100644 --- a/consumer/consumer.go +++ b/consumer/consumer.go @@ -14,34 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package consumer contains interface for any consumer that is able to -// process messages. It also contains implementation of Kafka consumer. -// -// It is expected that consumed messages are generated by ccx-data-pipeline -// based on OCP rules framework. The report generated by the framework are -// enhanced with more context information taken from different sources, like -// the organization ID, account number, unique cluster name, and the -// LastChecked timestamp (taken from the incoming Kafka record containing the -// URL to the archive). -// -// It is also expected that consumed messages contains one INFO rule hit that -// contains cluster version. That rule hit is produced by special rule used -// only in external data pipeline: -// "version_info|CLUSTER_VERSION_INFO" +// Package consumer contains interface for any consumer that is able to process +// messages. It also contains implementation of various Kafka consumers. // // For more information please see: // https://redhatinsights.github.io/insights-data-schemas/external-pipeline/ccx_data_pipeline.html package consumer import ( - "context" - "github.com/Shopify/sarama" - "github.com/rs/zerolog/log" - - "github.com/RedHatInsights/insights-results-aggregator/broker" - "github.com/RedHatInsights/insights-results-aggregator/producer" - "github.com/RedHatInsights/insights-results-aggregator/storage" ) // Consumer represents any consumer of insights-rules messages @@ -50,216 +31,3 @@ type Consumer interface { Close() error HandleMessage(msg *sarama.ConsumerMessage) error } - -// KafkaConsumer is an implementation of Consumer interface -// Example: -// -// kafkaConsumer, err := consumer.New(brokerCfg, storage) -// -// if err != nil { -// panic(err) -// } -// -// kafkaConsumer.Serve() -// -// err := kafkaConsumer.Stop() -// -// if err != nil { -// panic(err) -// } -type KafkaConsumer struct { - Configuration broker.Configuration - ConsumerGroup sarama.ConsumerGroup - Storage storage.Storage - numberOfSuccessfullyConsumedMessages uint64 - numberOfErrorsConsumingMessages uint64 - ready chan bool - cancel context.CancelFunc - payloadTrackerProducer *producer.PayloadTrackerProducer - deadLetterProducer *producer.DeadLetterProducer -} - -// DefaultSaramaConfig is a config which will be used by default -// here you can use specific version of a protocol for example -// useful for testing -var DefaultSaramaConfig *sarama.Config - -// New constructs new implementation of Consumer interface -func New(brokerCfg broker.Configuration, storage storage.Storage) (*KafkaConsumer, error) { - return NewWithSaramaConfig(brokerCfg, storage, DefaultSaramaConfig) -} - -// NewWithSaramaConfig constructs new implementation of Consumer interface with custom sarama config -func NewWithSaramaConfig( - brokerCfg broker.Configuration, - storage storage.Storage, - saramaConfig *sarama.Config, -) (*KafkaConsumer, error) { - var err error - - if saramaConfig == nil { - saramaConfig, err = broker.SaramaConfigFromBrokerConfig(brokerCfg) - if err != nil { - log.Error().Err(err).Msg("unable to create sarama configuration from current broker configuration") - return nil, err - } - } - - log.Info(). - Str("addr", brokerCfg.Address). - Str("group", brokerCfg.Group). - Msg("New consumer group") - - consumerGroup, err := sarama.NewConsumerGroup([]string{brokerCfg.Address}, brokerCfg.Group, saramaConfig) - if err != nil { - log.Error().Err(err).Msg("Unable to create consumer group") - return nil, err - } - log.Info().Msg("Consumer group has been created") - - log.Info().Msg("Constructing payload tracker producer") - payloadTrackerProducer, err := producer.NewPayloadTrackerProducer(brokerCfg) - if err != nil { - log.Error().Err(err).Msg("Unable to construct payload tracker producer") - return nil, err - } - if payloadTrackerProducer == nil { - log.Info().Msg("Payload tracker producer not configured") - } else { - log.Info().Msg("Payload tracker producer has been configured") - } - - log.Info().Msg("Constructing DLQ producer") - deadLetterProducer, err := producer.NewDeadLetterProducer(brokerCfg) - if err != nil { - log.Error().Err(err).Msg("Unable to construct dead letter producer") - return nil, err - } - if deadLetterProducer == nil { - log.Info().Msg("Dead letter producer not configured") - } else { - log.Info().Msg("Dead letter producer has been configured") - } - - consumer := &KafkaConsumer{ - Configuration: brokerCfg, - ConsumerGroup: consumerGroup, - Storage: storage, - numberOfSuccessfullyConsumedMessages: 0, - numberOfErrorsConsumingMessages: 0, - ready: make(chan bool), - payloadTrackerProducer: payloadTrackerProducer, - deadLetterProducer: deadLetterProducer, - } - - return consumer, nil -} - -// Serve starts listening for messages and processing them. It blocks current thread. -func (consumer *KafkaConsumer) Serve() { - ctx, cancel := context.WithCancel(context.Background()) - consumer.cancel = cancel - - go func() { - for { - // `Consume` should be called inside an infinite loop, when a - // server-side rebalance happens, the consumer session will need to be - // recreated to get the new claims - if err := consumer.ConsumerGroup.Consume(ctx, []string{consumer.Configuration.Topic}, consumer); err != nil { - log.Fatal().Err(err).Msg("unable to recreate kafka session") - } - - // check if context was cancelled, signaling that the consumer should stop - if ctx.Err() != nil { - return - } - - log.Info().Msg("created new kafka session") - - consumer.ready = make(chan bool) - } - }() - - // Await till the consumer has been set up - log.Info().Msg("waiting for consumer to become ready") - <-consumer.ready - log.Info().Msg("finished waiting for consumer to become ready") - - // Actual processing is done in goroutine created by sarama (see ConsumeClaim below) - log.Info().Msg("started serving consumer") - <-ctx.Done() - log.Info().Msg("context cancelled, exiting") - - cancel() -} - -// Setup is run at the beginning of a new session, before ConsumeClaim -func (consumer *KafkaConsumer) Setup(sarama.ConsumerGroupSession) error { - log.Info().Msg("new session has been setup") - // Mark the consumer as ready - close(consumer.ready) - return nil -} - -// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited -func (consumer *KafkaConsumer) Cleanup(sarama.ConsumerGroupSession) error { - log.Info().Msg("new session has been finished") - return nil -} - -// ConsumeClaim starts a consumer loop of ConsumerGroupClaim's Messages(). -func (consumer *KafkaConsumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - log.Info(). - Int64(offsetKey, claim.InitialOffset()). - Msg("starting messages loop") - - for message := range claim.Messages() { - err := consumer.HandleMessage(message) - if err != nil { - // already handled in HandleMessage, just log - log.Error().Err(err).Msg("Problem while handling the message") - } - session.MarkMessage(message, "") - } - - return nil -} - -// Close method closes all resources used by consumer -func (consumer *KafkaConsumer) Close() error { - if consumer.cancel != nil { - consumer.cancel() - } - - if consumer.ConsumerGroup != nil { - if err := consumer.ConsumerGroup.Close(); err != nil { - log.Error().Err(err).Msg("unable to close consumer group") - } - } - - if consumer.payloadTrackerProducer != nil { - if err := consumer.payloadTrackerProducer.Close(); err != nil { - log.Error().Err(err).Msg("unable to close payload tracker Kafka producer") - } - } - - if consumer.deadLetterProducer != nil { - if err := consumer.deadLetterProducer.Close(); err != nil { - log.Error().Err(err).Msg("unable to close dead letter Kafka producer") - } - } - - return nil -} - -// GetNumberOfSuccessfullyConsumedMessages returns number of consumed messages -// since creating KafkaConsumer obj -func (consumer *KafkaConsumer) GetNumberOfSuccessfullyConsumedMessages() uint64 { - return consumer.numberOfSuccessfullyConsumedMessages -} - -// GetNumberOfErrorsConsumingMessages returns number of errors during consuming messages -// since creating KafkaConsumer obj -func (consumer *KafkaConsumer) GetNumberOfErrorsConsumingMessages() uint64 { - return consumer.numberOfErrorsConsumingMessages -} diff --git a/consumer/consumer_test.go b/consumer/consumer_test.go index e8aea1e6e..1439801ba 100644 --- a/consumer/consumer_test.go +++ b/consumer/consumer_test.go @@ -17,7 +17,6 @@ limitations under the License. package consumer_test import ( - "bytes" "encoding/json" "errors" "fmt" @@ -26,13 +25,11 @@ import ( "testing" "time" - "github.com/RedHatInsights/insights-operator-utils/tests/saramahelpers" "github.com/RedHatInsights/insights-results-aggregator/producer" - ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" - zerolog_log "github.com/rs/zerolog/log" "github.com/RedHatInsights/insights-operator-utils/tests/helpers" "github.com/RedHatInsights/insights-results-aggregator-data/testdata" + ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" "github.com/Shopify/sarama" mapset "github.com/deckarep/golang-set" "github.com/rs/zerolog" @@ -40,7 +37,6 @@ import ( "github.com/RedHatInsights/insights-results-aggregator/broker" "github.com/RedHatInsights/insights-results-aggregator/consumer" - "github.com/RedHatInsights/insights-results-aggregator/storage" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -53,15 +49,16 @@ const ( // message to be checked organizationIDNotInAllowList = "organization ID is not in allow list" - testReport = `{"fingerprints": [], "info": [], "skips": [], "system": {}, "analysis_metadata":{"metadata":"some metadata"},"reports":[{"rule_id":"rule_4|RULE_4","component":"ccx_rules_ocp.external.rules.rule_1.report","type":"rule","key":"RULE_4","details":"some details"},{"rule_id":"rule_4|RULE_4","component":"ccx_rules_ocp.external.rules.rule_2.report","type":"rule","key":"RULE_2","details":"some details"},{"rule_id":"rule_5|RULE_5","component":"ccx_rules_ocp.external.rules.rule_5.report","type":"rule","key":"RULE_3","details":"some details"}]}` + testReport = `{"fingerprints": [], "info": [], "skips": [], "system": {}, "analysis_metadata":{"metadata":"some metadata"},"reports":[{"rule_id":"rule_4|RULE_4","component":"ccx_rules_ocp.external.rules.rule_1.report","type":"rule","key":"RULE_4","details":"some details"},{"rule_id":"rule_4|RULE_4","component":"ccx_rules_ocp.external.rules.rule_2.report","type":"rule","key":"RULE_2","details":"some details"},{"rule_id":"rule_5|RULE_5","component":"ccx_rules_ocp.external.rules.rule_5.report","type":"rule","key":"RULE_3","details":"some details"}]}` + testMetrics = `{"system":{"metadata":{},"hostname":null},"fingerprints":[],"version":1,"analysis_metadata":{},"workload_recommendations":[{"response_id":"an_issue|DVO_AN_ISSUE","component":"ccx_rules_ocp.external.dvo.an_issue_pod.recommendation","key":"DVO_AN_ISSUE","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","uid":"193a2099-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","name":"test-name-0099","uid":"193a2099-1234-5678-916a-d570c9aac158"}]}]}` ) var ( testOrgAllowlist = mapset.NewSetWith(types.OrgID(1)) wrongBrokerCfg = broker.Configuration{ - Address: "localhost:1234", - Topic: "topic", - Group: "group", + Addresses: "localhost:1234", + Topic: "topic", + Group: "group", } messageReportWithRuleHits = `{ "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, @@ -69,6 +66,12 @@ var ( "Report":` + testReport + `, "LastChecked": "` + testdata.LastCheckedAt.UTC().Format(time.RFC3339) + `" }` + messageReportWithDVOHits = `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Metrics":` + testMetrics + `, + "LastChecked": "` + testdata.LastCheckedAt.UTC().Format(time.RFC3339) + `" + }` messageNoReportsNoInfo = `{ "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, @@ -83,6 +86,13 @@ var ( "skips": [] } }` + + messageReportNoDVOMetrics = `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Metrics": {}, + "LastChecked": "` + testdata.LastCheckedAt.UTC().Format(time.RFC3339) + `" + }` ) func init() { @@ -99,24 +109,6 @@ func mustConsumerProcessMessage(t testing.TB, mockConsumer consumer.Consumer, me helpers.FailOnError(t, consumerProcessMessage(mockConsumer, message)) } -func dummyConsumer(s storage.Storage, allowlist bool) consumer.Consumer { - brokerCfg := broker.Configuration{ - Address: "localhost:1234", - Topic: "topic", - Group: "group", - } - if allowlist { - brokerCfg.OrgAllowlist = mapset.NewSetWith(types.OrgID(1)) - brokerCfg.OrgAllowlistEnabled = true - } else { - brokerCfg.OrgAllowlistEnabled = false - } - return &consumer.KafkaConsumer{ - Configuration: brokerCfg, - Storage: s, - } -} - func createConsumerMessage(report string) string { consumerMessage := `{ "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, @@ -128,131 +120,6 @@ func createConsumerMessage(report string) string { return consumerMessage } -func TestConsumerConstructorNoKafka(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) - defer closer() - - mockConsumer, err := consumer.New(wrongBrokerCfg, mockStorage) - assert.Error(t, err) - assert.Contains( - t, err.Error(), "kafka: client has run out of available brokers to talk to", - ) - assert.Equal( - t, - (*consumer.KafkaConsumer)(nil), - mockConsumer, - "consumer.New should return nil instead of Consumer implementation", - ) -} - -func TestDeserializeEmptyMessage(t *testing.T) { - _, err := consumer.DeserializeMessage([]byte("")) - assert.EqualError(t, err, "unexpected end of JSON input") -} - -func TestDeserializeMessageWithWrongContent(t *testing.T) { - const message = `{"this":"is", "not":"expected content"}` - _, err := consumer.DeserializeMessage([]byte(message)) - assert.Error(t, err) - assert.Contains(t, err.Error(), "missing required attribute") -} - -func TestDeserializeMessageWithImproperJSON(t *testing.T) { - const message = `"this_is_not_json_dude"` - _, err := consumer.DeserializeMessage([]byte(message)) - assert.EqualError( - t, - err, - "json: cannot unmarshal string into Go value of type consumer.incomingMessage", - ) -} - -func TestDeserializeMessageWithImproperReport(t *testing.T) { - consumerMessage := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "LastChecked": "` + testdata.LastCheckedAt.Format(time.RFC3339) + `", - "Report": { - "system": { - "metadata": {}, - "hostname": null - }, - "reports": "blablablabla", - "fingerprints": [], - "skips": [], - "info": [] - } - }` - message, err := consumer.DeserializeMessage([]byte(consumerMessage)) - helpers.FailOnError(t, err) - assert.Equal(t, types.OrgID(1), *message.Organization) - assert.Equal(t, testdata.ClusterName, *message.ClusterName) -} - -func TestDeserializeProperMessage(t *testing.T) { - message, err := consumer.DeserializeMessage([]byte(testdata.ConsumerMessage)) - helpers.FailOnError(t, err) - assert.Equal(t, types.OrgID(1), *message.Organization) - assert.Equal(t, testdata.ClusterName, *message.ClusterName) -} - -func TestDeserializeMessageWrongClusterName(t *testing.T) { - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "this is not a UUID", - "Report": ` + testdata.ConsumerReport + ` - }` - _, err := consumer.DeserializeMessage([]byte(message)) - assert.EqualError(t, err, "cluster name is not a UUID") -} - -func TestDeserializeMessageWithoutOrgID(t *testing.T) { - message := `{ - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report": ` + testdata.ConsumerReport + ` - }` - _, err := consumer.DeserializeMessage([]byte(message)) - assert.EqualError(t, err, "missing required attribute 'OrgID'") -} - -func TestDeserializeMessageWithoutClusterName(t *testing.T) { - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "Report": ` + testdata.ConsumerReport + ` - }` - _, err := consumer.DeserializeMessage([]byte(message)) - assert.EqualError(t, err, "missing required attribute 'ClusterName'") -} - -func TestDeserializeMessageWithoutReport(t *testing.T) { - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `" - }` - _, err := consumer.DeserializeMessage([]byte(message)) - assert.EqualError(t, err, "missing required attribute 'Report'") -} - -func TestDeserializeMessageWithEmptyReport(t *testing.T) { - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report": {} - }` - _, err := consumer.DeserializeMessage([]byte(message)) - assert.Nil(t, err, "deserializeMessage should not return error for empty report") -} - -func TestDeserializeMessageNullReport(t *testing.T) { - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report": null - }` - _, err := consumer.DeserializeMessage([]byte(message)) - assert.EqualError(t, err, "missing required attribute 'Report'") -} - func unmarshall(s string) *json.RawMessage { var res json.RawMessage err := json.Unmarshal([]byte(s), &res) @@ -262,525 +129,28 @@ func unmarshall(s string) *json.RawMessage { return &res } -func TestIsReportWithEmptyAttributesAllEmpty(t *testing.T) { - r := consumer.Report{ - "system": unmarshall(`{"metadata": {}, "hostname": null}`), - "reports": unmarshall("[]"), - "fingerprints": unmarshall("[]"), - "skips": unmarshall("[]"), - "info": unmarshall("[]"), - } - isEmpty := consumer.IsReportWithEmptyAttributes(r) - assert.True(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = true for this report") -} - -func TestIsReportWithEmptyAttributesEmptyReport(t *testing.T) { - r := consumer.Report{} - isEmpty := consumer.IsReportWithEmptyAttributes(r) - assert.True(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = true for this report") -} - -func TestIsReportWithEmptyAttributesSystemDataIsPresent(t *testing.T) { - r := consumer.Report{ - "system": unmarshall(`{"metadata": {}, "hostname": "a_hostname_that_can_be_unmarshalled"}`), - "reports": unmarshall("[]"), - "fingerprints": unmarshall("[]"), - "skips": unmarshall("[]"), - "info": unmarshall("[]"), - } - isEmpty := consumer.IsReportWithEmptyAttributes(r) - assert.False(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = false for this report") -} - -// Additional existence check have been added to avoid the possibility of -// panicking due to a nil pointer exception -func TestIsReportWithEmptyAttributesLessAttributes(t *testing.T) { - r := consumer.Report{ - "system": unmarshall(`{"metadata": {}, "hostname": "a_hostname_that_can_be_unmarshalled"}`), - "reports": unmarshall("[]"), - "fingerprints": unmarshall("[]"), - } - isEmpty := consumer.IsReportWithEmptyAttributes(r) - assert.False(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = false for this report") -} - -func TestIsReportWithEmptyAttributesInfoIsNotPresent(t *testing.T) { - r := consumer.Report{ - "system": unmarshall(`{"metadata": {}, "hostname": null}`), - "reports": unmarshall("[]"), - "fingerprints": unmarshall("[]"), - "skips": unmarshall("[]"), - } - isEmpty := consumer.IsReportWithEmptyAttributes(r) - assert.True(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = true for this report") -} - -func TestIsReportWithEmptyAttributesReportsIsPresent(t *testing.T) { - reportItems := `[ - { - "component": "test.rule", - "key": "test.key", - "details": ` + helpers.ToJSONString(testdata.Rule1ExtraData) + ` - } - ]` - r := consumer.Report{ - "system": unmarshall(`{"metadata": {}, "hostname": null}`), - "reports": unmarshall(reportItems), - "fingerprints": unmarshall("[]"), - "skips": unmarshall("[]"), - } - isEmpty := consumer.IsReportWithEmptyAttributes(r) - assert.False(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = false for this report") -} - -func TestCheckReportStructureEmptyReport(t *testing.T) { - report := consumer.Report{} - - err := consumer.CheckReportStructure(report) - assert.EqualError(t, err, "empty report found in deserialized message") -} - -func TestCheckReportStructureReportWithAllAttributesPresentAndEmpty(t *testing.T) { - report := consumer.Report{ - "fingerprints": unmarshall("[]"), - "info": unmarshall("[]"), - "reports": unmarshall("[]"), - "skips": unmarshall("[]"), - "system": unmarshall("{}"), - } - err := consumer.CheckReportStructure(report) - helpers.FailOnError(t, err, "empty report with all expected attributes present should be processed") -} - -func TestCheckReportStructureReportWithAnalysisMetadata(t *testing.T) { - report := consumer.Report{ - "system": unmarshall(`{"metadata": {}, "hostname": null}`), - "reports": unmarshall("[]"), - "fingerprints": unmarshall("[]"), - "analysis_metadata": unmarshall(`{"start": "2023-09-11T18:33:14.527845+00:00", "finish": "2023-09-11T18:33:15.632777+00:00"}`), - } - err := consumer.CheckReportStructure(report) - assert.EqualError(t, err, "empty report found in deserialized message") -} - -// If some attributes are missing, but all the present attributes are empty, we just -// skip the processing of the message. -func TestCheckReportStructureReportWithEmptyAndMissingAttributes(t *testing.T) { - report := consumer.Report{ - "fingerprints": unmarshall("[]"), - "reports": unmarshall("[]"), - "skips": unmarshall("[]"), - } - err := consumer.CheckReportStructure(report) - assert.EqualError(t, err, "empty report found in deserialized message") -} - -func TestCheckReportStructureReportWithItems(t *testing.T) { - report := consumer.Report{ - "fingerprints": unmarshall("[]"), - "info": unmarshall("[]"), - "reports": unmarshall(string(testdata.Report2Rules)), - "skips": unmarshall("[]"), - "system": unmarshall(`{"metadata": {},"hostname": null}`), - } - err := consumer.CheckReportStructure(report) - assert.Nil(t, err, "checkReportStructure should return err = nil for empty reports") -} - -func TestParseReportContentEmptyReportsAttribute(t *testing.T) { - deserialized, err := consumer.DeserializeMessage([]byte(testdata.ConsumerMessage)) - assert.Nil(t, err, "deserializeMessage should not return error for this message") - - err = consumer.ParseReportContent(&deserialized) - assert.Nil(t, err, "ParseReportContent should not return nil for empty report") -} - -func TestParseReportContentValidReport(t *testing.T) { - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report":` + string(testdata.Report2Rules) + ` - }` - - deserialized, err := consumer.DeserializeMessage([]byte(message)) - assert.Nil(t, err, "deserializeMessage should not return error for this message") - - err = consumer.ParseReportContent(&deserialized) - assert.Nil(t, err, "ParseReportContent should return nil for reports with proper structure") -} - -func TestParseEmptyMessage(t *testing.T) { - c := consumer.KafkaConsumer{} - message := sarama.ConsumerMessage{} - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "unexpected end of JSON input") -} - -func TestParseMessageWithWrongContent(t *testing.T) { - c := consumer.KafkaConsumer{} - message := sarama.ConsumerMessage{Value: []byte(`{"this":"is", "not":"expected content"}`)} - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "missing required attribute 'OrgID'") -} - -func TestParseProperMessageWrongClusterName(t *testing.T) { - c := consumer.KafkaConsumer{} - data := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "this is not a UUID", - "Report": ` + testdata.ConsumerReport + ` - }` - message := sarama.ConsumerMessage{Value: []byte(data)} - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "cluster name is not a UUID") -} - -func TestParseMessageWithoutOrgID(t *testing.T) { - c := consumer.KafkaConsumer{} - data := `{ - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report": ` + testdata.ConsumerReport + ` - }` - message := sarama.ConsumerMessage{Value: []byte(data)} - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "missing required attribute 'OrgID'") -} - -func TestParseMessageWithoutClusterName(t *testing.T) { - c := consumer.KafkaConsumer{} - data := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "Report": ` + testdata.ConsumerReport + ` - }` - message := sarama.ConsumerMessage{Value: []byte(data)} - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "missing required attribute 'ClusterName'") -} - -func TestParseMessageWithoutReport(t *testing.T) { - c := consumer.KafkaConsumer{} - data := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `" - }` - message := sarama.ConsumerMessage{Value: []byte(data)} - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "missing required attribute 'Report'") -} - -func TestParseMessageEmptyReport(t *testing.T) { - c := consumer.KafkaConsumer{} - data := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report": {} - }` - message := sarama.ConsumerMessage{Value: []byte(data)} - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "empty report found in deserialized message") -} -func TestParseMessageNullReport(t *testing.T) { - c := consumer.KafkaConsumer{} - data := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report": null - }` - message := sarama.ConsumerMessage{Value: []byte(data)} - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "missing required attribute 'Report'") -} - -func TestParseMessageWithImproperJSON(t *testing.T) { - c := consumer.KafkaConsumer{} - message := sarama.ConsumerMessage{Value: []byte(`"this_is_not_json_dude"`)} - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "json: cannot unmarshal string into Go value of type consumer.incomingMessage") -} - -func TestParseMessageWithImproperReport(t *testing.T) { - data := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "LastChecked": "` + testdata.LastCheckedAt.Format(time.RFC3339) + `", - "Report": { - "system": { - "metadata": {}, - "hostname": null - }, - "reports": "blablablabla", - "fingerprints": [], - "skips": [], - "info": [] - } - }` - c := consumer.KafkaConsumer{} - message := sarama.ConsumerMessage{Value: []byte(data)} - - _, err := consumer.ParseMessage(&c, &message) - assert.EqualError(t, err, "json: cannot unmarshal string into Go value of type []types.ReportItem") -} - -func TestParseProperMessageReportWithEmptyAttributes(t *testing.T) { - c := consumer.KafkaConsumer{} - message := sarama.ConsumerMessage{Value: []byte(testdata.ConsumerMessage)} - parsed, err := consumer.ParseMessage(&c, &message) - helpers.FailOnError(t, err, "empty report with all expected attributes present should be processed") - - assert.Equal(t, types.OrgID(1), *parsed.Organization) - assert.Equal(t, testdata.ClusterName, *parsed.ClusterName) - - var expectedReport consumer.Report - err = json.Unmarshal([]byte(testdata.ConsumerReport), &expectedReport) - helpers.FailOnError(t, err) - assert.Equal(t, expectedReport, *parsed.Report) - assert.EqualValues(t, []types.ReportItem{}, parsed.ParsedHits) -} - -func TestParseProperMessageWithInfoReport(t *testing.T) { - // this report contains just one rule hit: from special INFO rule used - // just in external data pipeline - consumerReport := `{ - "fingerprints": [], - "reports": [], - "skips": [], - "system": {}, - "info": [ - { - "info_id": "version_info|CLUSTER_VERSION_INFO", - "component": "ccx_rules_processing.version_info.report", - "type": "info", - "key": "CLUSTER_VERSION_INFO", - "details": { - "version": "4.9", - "type": "info", - "info_key": "CLUSTER_VERSION_INFO" - }, - "tags": [], - "links": {} - } - ] - - }` - c := consumer.KafkaConsumer{} - message := sarama.ConsumerMessage{Value: []byte(createConsumerMessage(consumerReport))} - parsed, err := consumer.ParseMessage(&c, &message) - helpers.FailOnError(t, err, "this message is valid and should be processed") - assert.Equal(t, types.OrgID(1), *parsed.Organization) - assert.Equal(t, testdata.ClusterName, *parsed.ClusterName) - - var expectedReport consumer.Report - err = json.Unmarshal([]byte(consumerReport), &expectedReport) - helpers.FailOnError(t, err) - assert.Equal(t, expectedReport, *parsed.Report) - assert.EqualValues(t, []types.ReportItem{}, parsed.ParsedHits) - - expectedInfoReport := []types.InfoItem{ - { - InfoID: "version_info|CLUSTER_VERSION_INFO", - InfoKey: "CLUSTER_VERSION_INFO", - Details: map[string]string{ - "version": "4.9", - "type": "info", - "info_key": "CLUSTER_VERSION_INFO", - }, - }, - } - assert.EqualValues(t, expectedInfoReport, parsed.ParsedInfo) -} - -func TestProcessEmptyMessage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) +func TestConsumerConstructorNoKafka(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() - c := dummyConsumer(mockStorage, true) - - message := sarama.ConsumerMessage{} - // message is empty -> nothing should be written into storage - err := c.HandleMessage(&message) - assert.EqualError(t, err, "unexpected end of JSON input") - - count, err := mockStorage.ReportsCount() - helpers.FailOnError(t, err) - - // no record should be written into database + mockConsumer, err := consumer.NewKafkaConsumer(wrongBrokerCfg, mockStorage, nil) + assert.Error(t, err) + assert.Contains( + t, err.Error(), "kafka: client has run out of available brokers to talk to", + ) assert.Equal( t, - 0, - count, - "process message shouldn't write anything into the DB", + (*consumer.KafkaConsumer)(nil), + mockConsumer, + "consumer.New should return nil instead of Consumer implementation", ) } -func TestProcessCorrectMessage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - - defer closer() - - c := dummyConsumer(mockStorage, true) - - message := sarama.ConsumerMessage{} - message.Value = []byte(messageReportWithRuleHits) - // message is correct -> one record should be written into storage - err := c.HandleMessage(&message) - helpers.FailOnError(t, err) - - count, err := mockStorage.ReportsCount() - helpers.FailOnError(t, err) - - // exactly one record should be written into database - assert.Equal(t, 1, count, "process message should write one record into DB") -} - -func TestProcessingEmptyReportMissingAttributesWithClosedStorage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - - mockConsumer := dummyConsumer(mockStorage, true) - closer() - - err := consumerProcessMessage(mockConsumer, messageNoReportsNoInfo) - helpers.FailOnError(t, err, "empty report should not be considered an error at HandleMessage level") -} - -func TestProcessingValidMessageEmptyReportWithRequiredAttributesWithClosedStorage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - - mockConsumer := dummyConsumer(mockStorage, true) - closer() - - err := consumerProcessMessage(mockConsumer, testdata.ConsumerMessage) - assert.EqualError(t, err, "sql: database is closed") -} - -func TestProcessingCorrectMessageWithClosedStorage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - - mockConsumer := dummyConsumer(mockStorage, true) - closer() - - err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) - assert.EqualError(t, err, "sql: database is closed") -} - -func TestProcessingMessageWithWrongDateFormatAndEmptyReport(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - mockConsumer := dummyConsumer(mockStorage, true) - - err := consumerProcessMessage(mockConsumer, messageNoReportsNoInfo) - assert.Nil(t, err, "Message with empty report should not be processed") -} - -func TestProcessingMessageWithWrongDateFormatReportNotEmpty(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - mockConsumer := dummyConsumer(mockStorage, true) - - messageValue := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report":` + testReport + `, - "LastChecked": "2020.01.23 16:15:59" - }` - err := consumerProcessMessage(mockConsumer, messageValue) - if _, ok := err.(*time.ParseError); err == nil || !ok { - t.Fatal(fmt.Errorf( - "expected time.ParseError error because date format is wrong. Got %+v", err, - )) - } -} - -func TestKafkaConsumerMockOK(t *testing.T) { - helpers.RunTestWithTimeout(t, func(t testing.TB) { - mockConsumer, closer := ira_helpers.MustGetMockKafkaConsumerWithExpectedMessages( - t, - testTopicName, - testOrgAllowlist, - []string{messageReportWithRuleHits}, - ) - - go mockConsumer.Serve() - - // wait for message processing - ira_helpers.WaitForMockConsumerToHaveNConsumedMessages(mockConsumer, 1) - - closer() - - assert.Equal(t, uint64(1), mockConsumer.KafkaConsumer.GetNumberOfSuccessfullyConsumedMessages()) - assert.Equal(t, uint64(0), mockConsumer.KafkaConsumer.GetNumberOfErrorsConsumingMessages()) - }, testCaseTimeLimit) -} - -func TestKafkaConsumerMockBadMessage(t *testing.T) { - helpers.RunTestWithTimeout(t, func(t testing.TB) { - mockConsumer, closer := ira_helpers.MustGetMockKafkaConsumerWithExpectedMessages( - t, - testTopicName, - testOrgAllowlist, - []string{"bad message"}, - ) - - go mockConsumer.Serve() - - // wait for message processing - ira_helpers.WaitForMockConsumerToHaveNConsumedMessages(mockConsumer, 1) - - closer() - - assert.Equal(t, uint64(0), mockConsumer.KafkaConsumer.GetNumberOfSuccessfullyConsumedMessages()) - assert.Equal(t, uint64(1), mockConsumer.KafkaConsumer.GetNumberOfErrorsConsumingMessages()) - }, testCaseTimeLimit) -} - -func TestKafkaConsumerMockWritingMsgWithEmptyReportToClosedStorage(t *testing.T) { - helpers.RunTestWithTimeout(t, func(t testing.TB) { - mockConsumer, closer := ira_helpers.MustGetMockKafkaConsumerWithExpectedMessages( - t, testTopicName, testOrgAllowlist, []string{messageNoReportsNoInfo}, - ) - - err := mockConsumer.KafkaConsumer.Storage.Close() - helpers.FailOnError(t, err) - - go mockConsumer.Serve() - - ira_helpers.WaitForMockConsumerToHaveNConsumedMessages(mockConsumer, 1) - - closer() - - // Since the report is present but empty, we stop processing this message without errors - assert.Equal(t, uint64(1), mockConsumer.KafkaConsumer.GetNumberOfSuccessfullyConsumedMessages()) - assert.Equal(t, uint64(0), mockConsumer.KafkaConsumer.GetNumberOfErrorsConsumingMessages()) - }, testCaseTimeLimit) -} - -func TestKafkaConsumerMockWritingMsgWithReportToClosedStorage(t *testing.T) { - helpers.RunTestWithTimeout(t, func(t testing.TB) { - mockConsumer, closer := ira_helpers.MustGetMockKafkaConsumerWithExpectedMessages( - t, testTopicName, testOrgAllowlist, []string{messageReportWithRuleHits}, - ) - - err := mockConsumer.KafkaConsumer.Storage.Close() - helpers.FailOnError(t, err) - - go mockConsumer.Serve() - - ira_helpers.WaitForMockConsumerToHaveNConsumedMessages(mockConsumer, 1) - - closer() - - // Since the report is present and not empty, it is processed, and we reach the closed DB error - assert.Equal(t, uint64(0), mockConsumer.KafkaConsumer.GetNumberOfSuccessfullyConsumedMessages()) - assert.Equal(t, uint64(1), mockConsumer.KafkaConsumer.GetNumberOfErrorsConsumingMessages()) - }, testCaseTimeLimit) -} - func TestKafkaConsumer_New(t *testing.T) { helpers.RunTestWithTimeout(t, func(t testing.TB) { sarama.Logger = log.New(os.Stdout, saramaLogPrefix, log.LstdFlags) - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mockBroker := sarama.NewMockBroker(t, 0) @@ -788,11 +158,11 @@ func TestKafkaConsumer_New(t *testing.T) { mockBroker.SetHandlerByMap(ira_helpers.GetHandlersMapForMockConsumer(t, mockBroker, testTopicName)) - mockConsumer, err := consumer.New(broker.Configuration{ - Address: mockBroker.Addr(), - Topic: testTopicName, - Enabled: true, - }, mockStorage) + mockConsumer, err := consumer.NewKafkaConsumer(broker.Configuration{ + Addresses: mockBroker.Addr(), + Topic: testTopicName, + Enabled: true, + }, mockStorage, nil) helpers.FailOnError(t, err) err = mockConsumer.Close() @@ -800,292 +170,8 @@ func TestKafkaConsumer_New(t *testing.T) { }, testCaseTimeLimit) } -func TestKafkaConsumer_ProcessMessage_OrganizationAllowlistDisabled(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - mockConsumer := dummyConsumer(mockStorage, false) - - err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) - helpers.FailOnError(t, err) -} - -func TestKafkaConsumer_ProcessMessageWithEmptyReport_OrganizationIsNotAllowed(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - brokerCfg := broker.Configuration{ - Address: "localhost:1234", - Topic: "topic", - Group: "group", - OrgAllowlist: mapset.NewSetWith(types.OrgID(123)), // in testdata, OrgID = 1 - OrgAllowlistEnabled: true, - } - mockConsumer := &consumer.KafkaConsumer{ - Configuration: brokerCfg, - Storage: mockStorage, - } - - err := consumerProcessMessage(mockConsumer, messageNoReportsNoInfo) - helpers.FailOnError(t, err, "message have empty report and should not be processed") -} - -func TestKafkaConsumer_ProcessMessage_OrganizationIsNotAllowed(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - brokerCfg := broker.Configuration{ - Address: "localhost:1234", - Topic: "topic", - Group: "group", - OrgAllowlist: mapset.NewSetWith(types.OrgID(123)), // in testdata, OrgID = 1 - OrgAllowlistEnabled: true, - } - mockConsumer := &consumer.KafkaConsumer{ - Configuration: brokerCfg, - Storage: mockStorage, - } - - err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) - assert.EqualError(t, err, organizationIDNotInAllowList) -} - -func TestKafkaConsumer_ProcessMessageWithEmptyReport_OrganizationBadConfigIsNotAllowed(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - brokerCfg := broker.Configuration{ - Address: "localhost:1234", - Topic: "topic", - Group: "group", - OrgAllowlist: nil, - OrgAllowlistEnabled: true, - } - mockConsumer := &consumer.KafkaConsumer{ - Configuration: brokerCfg, - Storage: mockStorage, - } - - err := consumerProcessMessage(mockConsumer, messageNoReportsNoInfo) - helpers.FailOnError(t, err, "message have empty report and should not be processed") -} - -func TestKafkaConsumer_ProcessMessage_OrganizationBadConfigIsNotAllowed(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - brokerCfg := broker.Configuration{ - Address: "localhost:1234", - Topic: "topic", - Group: "group", - OrgAllowlist: nil, - OrgAllowlistEnabled: true, - } - mockConsumer := &consumer.KafkaConsumer{ - Configuration: brokerCfg, - Storage: mockStorage, - } - - err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) - assert.EqualError(t, err, organizationIDNotInAllowList) -} - -func TestKafkaConsumer_ProcessMessage_MessageFromTheFuture(t *testing.T) { - buf := new(bytes.Buffer) - zerolog_log.Logger = zerolog.New(buf) - - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - mockConsumer := &consumer.KafkaConsumer{ - Configuration: wrongBrokerCfg, - Storage: mockStorage, - } - - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report":` + testReport + `, - "LastChecked": "` + time.Now().Add(24*time.Hour).Format(time.RFC3339) + `" - }` - - err := consumerProcessMessage(mockConsumer, message) - helpers.FailOnError(t, err) - assert.Contains(t, buf.String(), "got a message from the future") -} - -func TestKafkaConsumer_ProcessMessage_MoreRecentReportAlreadyExists(t *testing.T) { - zerolog.SetGlobalLevel(zerolog.InfoLevel) - buf := new(bytes.Buffer) - zerolog_log.Logger = zerolog.New(buf) - - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - mockConsumer := &consumer.KafkaConsumer{ - Configuration: wrongBrokerCfg, - Storage: mockStorage, - } - - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report":` + testReport + `, - "LastChecked": "` + time.Now().Format(time.RFC3339) + `" - }` - - err := consumerProcessMessage(mockConsumer, message) - helpers.FailOnError(t, err) - - message = `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report":` + testReport + `, - "LastChecked": "` + time.Now().Add(-24*time.Hour).Format(time.RFC3339) + `" - }` - - err = consumerProcessMessage(mockConsumer, message) - helpers.FailOnError(t, err) - - assert.Contains(t, buf.String(), "Skipping because a more recent report already exists for this cluster") -} - -func TestKafkaConsumer_ProcessMessage_MessageWithNoSchemaVersion(t *testing.T) { - buf := new(bytes.Buffer) - zerolog_log.Logger = zerolog.New(buf) - - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - mockConsumer := &consumer.KafkaConsumer{ - Configuration: wrongBrokerCfg, - Storage: mockStorage, - } - - err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) - helpers.FailOnError(t, err) - assert.Contains(t, buf.String(), "\"level\":\"warn\"") - assert.Contains(t, buf.String(), "Received data with unexpected version") -} - -func TestKafkaConsumer_ProcessMessage_MessageWithUnexpectedSchemaVersion(t *testing.T) { - buf := new(bytes.Buffer) - zerolog_log.Logger = zerolog.New(buf) - - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - mockConsumer := &consumer.KafkaConsumer{ - Configuration: wrongBrokerCfg, - Storage: mockStorage, - } - - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report":` + testReport + `, - "LastChecked": "` + time.Now().Add(-24*time.Hour).Format(time.RFC3339) + `", - "Version": ` + fmt.Sprintf("%d", types.SchemaVersion(3)) + ` - }` - - err := consumerProcessMessage(mockConsumer, message) - helpers.FailOnError(t, err) - assert.Contains(t, buf.String(), "\"level\":\"warn\"") - assert.Contains(t, buf.String(), "Received data with unexpected version") -} - -func TestKafkaConsumer_ProcessMessage_MessageWithExpectedSchemaVersion(t *testing.T) { - buf := new(bytes.Buffer) - zerolog_log.Logger = zerolog.New(buf) - - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - mockConsumer := &consumer.KafkaConsumer{ - Configuration: wrongBrokerCfg, - Storage: mockStorage, - } - - message := `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report":` + testReport + `, - "LastChecked": "` + time.Now().Add(-24*time.Hour).Format(time.RFC3339) + `", - "Version": ` + fmt.Sprintf("%d", types.SchemaVersion(1)) + ` - }` - - err := consumerProcessMessage(mockConsumer, message) - helpers.FailOnError(t, err) - - message = `{ - "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, - "ClusterName": "` + string(testdata.ClusterName) + `", - "Report":` + testReport + `, - "LastChecked": "` + time.Now().Add(-24*time.Hour).Format(time.RFC3339) + `", - "Version": ` + fmt.Sprintf("%d", types.SchemaVersion(2)) + ` - }` - - err = consumerProcessMessage(mockConsumer, message) - helpers.FailOnError(t, err) - - assert.NotContains(t, buf.String(), "\"level\":\"warn\"") - assert.NotContains(t, buf.String(), "Received data with unexpected version") -} - -func TestKafkaConsumer_ConsumeClaim(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - kafkaConsumer := consumer.KafkaConsumer{ - Storage: mockStorage, - } - - mockConsumerGroupSession := &saramahelpers.MockConsumerGroupSession{} - mockConsumerGroupClaim := saramahelpers.NewMockConsumerGroupClaim(nil) - - err := kafkaConsumer.ConsumeClaim(mockConsumerGroupSession, mockConsumerGroupClaim) - helpers.FailOnError(t, err) -} - -func TestKafkaConsumer_ConsumeClaim_DBError(t *testing.T) { - buf := new(bytes.Buffer) - zerolog_log.Logger = zerolog.New(buf) - - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - closer() - - kafkaConsumer := consumer.KafkaConsumer{ - Storage: mockStorage, - } - - mockConsumerGroupSession := &saramahelpers.MockConsumerGroupSession{} - mockConsumerGroupClaim := saramahelpers.NewMockConsumerGroupClaim(nil) - - err := kafkaConsumer.ConsumeClaim(mockConsumerGroupSession, mockConsumerGroupClaim) - helpers.FailOnError(t, err) - - assert.Contains(t, buf.String(), "starting messages loop") -} - -func TestKafkaConsumer_ConsumeClaim_OKMessage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) - defer closer() - - kafkaConsumer := consumer.KafkaConsumer{ - Storage: mockStorage, - } - - mockConsumerGroupSession := &saramahelpers.MockConsumerGroupSession{} - mockConsumerGroupClaim := saramahelpers.NewMockConsumerGroupClaim([]*sarama.ConsumerMessage{ - saramahelpers.StringToSaramaConsumerMessage(testdata.ConsumerMessage), - }) - - err := kafkaConsumer.ConsumeClaim(mockConsumerGroupSession, mockConsumerGroupClaim) - helpers.FailOnError(t, err) -} - func TestKafkaConsumer_SetupCleanup(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() mockBroker := sarama.NewMockBroker(t, 0) @@ -1093,11 +179,11 @@ func TestKafkaConsumer_SetupCleanup(t *testing.T) { mockBroker.SetHandlerByMap(ira_helpers.GetHandlersMapForMockConsumer(t, mockBroker, testTopicName)) - mockConsumer, err := consumer.New(broker.Configuration{ - Address: mockBroker.Addr(), - Topic: testTopicName, - Enabled: true, - }, mockStorage) + mockConsumer, err := consumer.NewKafkaConsumer(broker.Configuration{ + Addresses: mockBroker.Addr(), + Topic: testTopicName, + Enabled: true, + }, mockStorage, nil) helpers.FailOnError(t, err) defer func() { @@ -1111,15 +197,24 @@ func TestKafkaConsumer_SetupCleanup(t *testing.T) { } func TestKafkaConsumer_NewDeadLetterProducer_Error(t *testing.T) { - producer.NewDeadLetterProducer = func(brokerCfg broker.Configuration) (*producer.DeadLetterProducer, error) { + // Backup original functions + originalNewDeadLetterProducer := producer.NewDeadLetterProducer + originalNewPayloadTrackerProducer := producer.NewPayloadTrackerProducer + defer func() { + producer.NewDeadLetterProducer = originalNewDeadLetterProducer + producer.NewPayloadTrackerProducer = originalNewPayloadTrackerProducer + }() + + // Override functions for testing + producer.NewDeadLetterProducer = func(_ broker.Configuration) (*producer.DeadLetterProducer, error) { return nil, errors.New("error happened") } - producer.NewPayloadTrackerProducer = func(brokerCfg broker.Configuration) (*producer.PayloadTrackerProducer, error) { + producer.NewPayloadTrackerProducer = func(_ broker.Configuration) (*producer.PayloadTrackerProducer, error) { return nil, nil } - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mockBroker := sarama.NewMockBroker(t, 0) @@ -1127,25 +222,34 @@ func TestKafkaConsumer_NewDeadLetterProducer_Error(t *testing.T) { mockBroker.SetHandlerByMap(ira_helpers.GetHandlersMapForMockConsumer(t, mockBroker, testTopicName)) - _, err := consumer.New(broker.Configuration{ - Address: mockBroker.Addr(), - Topic: testTopicName, - Enabled: true, - }, mockStorage) + _, err := consumer.NewKafkaConsumer(broker.Configuration{ + Addresses: mockBroker.Addr(), + Topic: testTopicName, + Enabled: true, + }, mockStorage, nil) assert.EqualError(t, err, "error happened") } func TestKafkaConsumer_NewPayloadTrackerProducer_Error(t *testing.T) { - producer.NewDeadLetterProducer = func(brokerCfg broker.Configuration) (*producer.DeadLetterProducer, error) { + // Backup original functions + originalNewDeadLetterProducer := producer.NewDeadLetterProducer + originalNewPayloadTrackerProducer := producer.NewPayloadTrackerProducer + defer func() { + producer.NewDeadLetterProducer = originalNewDeadLetterProducer + producer.NewPayloadTrackerProducer = originalNewPayloadTrackerProducer + }() + + // Override functions for testing + producer.NewDeadLetterProducer = func(_ broker.Configuration) (*producer.DeadLetterProducer, error) { return nil, nil } - producer.NewPayloadTrackerProducer = func(brokerCfg broker.Configuration) (*producer.PayloadTrackerProducer, error) { + producer.NewPayloadTrackerProducer = func(_ broker.Configuration) (*producer.PayloadTrackerProducer, error) { return nil, errors.New("error happened") } - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mockBroker := sarama.NewMockBroker(t, 0) @@ -1153,11 +257,11 @@ func TestKafkaConsumer_NewPayloadTrackerProducer_Error(t *testing.T) { mockBroker.SetHandlerByMap(ira_helpers.GetHandlersMapForMockConsumer(t, mockBroker, testTopicName)) - _, err := consumer.New(broker.Configuration{ - Address: mockBroker.Addr(), - Topic: testTopicName, - Enabled: true, - }, mockStorage) + _, err := consumer.NewKafkaConsumer(broker.Configuration{ + Addresses: mockBroker.Addr(), + Topic: testTopicName, + Enabled: true, + }, mockStorage, nil) assert.EqualError(t, err, "error happened") } diff --git a/consumer/dvo_processing.go b/consumer/dvo_processing.go new file mode 100644 index 000000000..fc7333dc0 --- /dev/null +++ b/consumer/dvo_processing.go @@ -0,0 +1,165 @@ +// Copyright 2023 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumer + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/RedHatInsights/insights-results-aggregator/producer" + "github.com/RedHatInsights/insights-results-aggregator/storage" + "github.com/rs/zerolog/log" + + "github.com/RedHatInsights/insights-results-aggregator/types" + "github.com/Shopify/sarama" + "github.com/google/uuid" +) + +// DVORulesProcessor satisfies MessageProcessor interface +type DVORulesProcessor struct { +} + +func (DVORulesProcessor) deserializeMessage(messageValue []byte) (incomingMessage, error) { + var deserialized incomingMessage + + received, err := DecompressMessage(messageValue) + if err != nil { + return deserialized, err + } + + err = json.Unmarshal(received, &deserialized) + if err != nil { + return deserialized, err + } + if deserialized.Organization == nil { + return deserialized, errors.New("missing required attribute 'OrgID'") + } + if deserialized.ClusterName == nil { + return deserialized, errors.New("missing required attribute 'ClusterName'") + } + _, err = uuid.Parse(string(*deserialized.ClusterName)) + if err != nil { + return deserialized, errors.New("cluster name is not a UUID") + } + if deserialized.DvoMetrics == nil { + return deserialized, errors.New("missing required attribute 'Metrics'") + } + return deserialized, nil +} + +// parseMessage is the entry point for parsing the received message. +// It should be the first method called within ProcessMessage in order +// to convert the message into a struct that can be worked with +func (DVORulesProcessor) parseMessage(consumer *KafkaConsumer, msg *sarama.ConsumerMessage) (incomingMessage, error) { + message, err := consumer.MessageProcessor.deserializeMessage(msg.Value) + if err != nil { + consumer.logMsgForFurtherAnalysis(msg) + logUnparsedMessageError(consumer, msg, "Error parsing message from Kafka", err) + return message, err + } + consumer.updatePayloadTracker(message.RequestID, time.Now(), message.Organization, message.Account, producer.StatusReceived) + + if err := consumer.MessageProcessor.shouldProcess(consumer, msg, &message); err != nil { + return message, err + } + err = parseDVOContent(&message) + if err != nil { + consumer.logReportStructureError(err, msg) + return message, err + } + + return message, nil +} + +func (processor DVORulesProcessor) processMessage(consumer *KafkaConsumer, msg *sarama.ConsumerMessage) (types.RequestID, incomingMessage, error) { + return commonProcessMessage(consumer, msg, processor.storeInDB) +} + +func (DVORulesProcessor) shouldProcess(_ *KafkaConsumer, _ *sarama.ConsumerMessage, parsed *incomingMessage) error { + rawMetrics := *parsed.DvoMetrics + if len(rawMetrics) == 0 { + log.Debug().Msg("The 'Metrics' part of the JSON is empty. This message will be skipped") + return types.ErrEmptyReport + } + if _, found := rawMetrics["workload_recommendations"]; !found { + return fmt.Errorf("improper report structure, missing key with name 'workload_recommendations'") + } + return nil +} + +// parseDVOContent verifies the content of the DVO structure and parses it into +// the relevant parts of the incomingMessage structure +func parseDVOContent(message *incomingMessage) error { + return json.Unmarshal(*((*message.DvoMetrics)["workload_recommendations"]), &message.ParsedWorkloads) +} + +func (DVORulesProcessor) storeInDB(consumer *KafkaConsumer, msg *sarama.ConsumerMessage, message incomingMessage) (types.RequestID, incomingMessage, error) { + tStart := time.Now() + lastCheckedTime, err := consumer.retrieveLastCheckedTime(msg, &message) + if err != nil { + return message.RequestID, message, err + } + tTimeCheck := time.Now() + logDuration(tStart, tTimeCheck, msg.Offset, "time_check") + + reportAsBytes, err := json.Marshal(*message.DvoMetrics) + if err != nil { + logMessageError(consumer, msg, &message, "Error marshalling report", err) + return message.RequestID, message, err + } + + err = consumer.writeDVOReport(msg, message, reportAsBytes, lastCheckedTime) + if err != nil { + return message.RequestID, message, err + } + tStored := time.Now() + logDuration(tTimeCheck, tStored, msg.Offset, "db_store_report") + return message.RequestID, message, nil +} + +func (consumer *KafkaConsumer) writeDVOReport( + msg *sarama.ConsumerMessage, message incomingMessage, + reportAsBytes []byte, lastCheckedTime time.Time, +) error { + if dvoStorage, ok := consumer.Storage.(storage.DVORecommendationsStorage); ok { + // timestamp when the report is about to be written into database + storedAtTime := time.Now() + + err := dvoStorage.WriteReportForCluster( + *message.Organization, + *message.ClusterName, + types.ClusterReport(reportAsBytes), + message.ParsedWorkloads, + lastCheckedTime, + message.Metadata.GatheredAt, + storedAtTime, + message.RequestID, + ) + if err == types.ErrOldReport { + logMessageInfo(consumer, msg, &message, "Skipping because a more recent report already exists for this cluster") + return nil + } else if err != nil { + logMessageError(consumer, msg, &message, "Error writing report to database", err) + return err + } + logMessageDebug(consumer, msg, &message, "Stored report") + return nil + } + err := errors.New("report could not be stored") + logMessageError(consumer, msg, &message, unexpectedStorageType, err) + return err +} diff --git a/consumer/dvo_rules_consumer.go b/consumer/dvo_rules_consumer.go new file mode 100644 index 000000000..b75451405 --- /dev/null +++ b/consumer/dvo_rules_consumer.go @@ -0,0 +1,27 @@ +/* +Copyright © 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consumer + +import ( + "github.com/RedHatInsights/insights-results-aggregator/broker" + "github.com/RedHatInsights/insights-results-aggregator/storage" +) + +// NewDVORulesConsumer constructs new implementation of Consumer interface +func NewDVORulesConsumer(brokerCfg broker.Configuration, storage storage.DVORecommendationsStorage) (*KafkaConsumer, error) { + return NewKafkaConsumer(brokerCfg, storage, DVORulesProcessor{}) +} diff --git a/consumer/dvo_rules_consumer_test.go b/consumer/dvo_rules_consumer_test.go new file mode 100644 index 000000000..82471a42b --- /dev/null +++ b/consumer/dvo_rules_consumer_test.go @@ -0,0 +1,477 @@ +/* +Copyright © 2020, 2021, 2022, 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consumer_test + +import ( + "encoding/json" + "fmt" + "log" + "os" + "testing" + "time" + + "github.com/RedHatInsights/insights-results-aggregator/storage" + + "github.com/RedHatInsights/insights-operator-utils/tests/helpers" + "github.com/RedHatInsights/insights-results-aggregator-data/testdata" + ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" + "github.com/Shopify/sarama" + mapset "github.com/deckarep/golang-set" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + + "github.com/RedHatInsights/insights-results-aggregator/broker" + "github.com/RedHatInsights/insights-results-aggregator/consumer" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +func init() { + zerolog.SetGlobalLevel(zerolog.WarnLevel) +} + +var ( + dvoConsumer = consumer.KafkaConsumer{ + MessageProcessor: consumer.DVORulesProcessor{}, + } +) + +func createDVOConsumer(brokerCfg broker.Configuration, mockStorage storage.DVORecommendationsStorage) *consumer.KafkaConsumer { + return &consumer.KafkaConsumer{ + Configuration: brokerCfg, + Storage: mockStorage, + MessageProcessor: consumer.DVORulesProcessor{}, + } +} + +func dummyDVOConsumer(s storage.DVORecommendationsStorage, allowlist bool) consumer.Consumer { + brokerCfg := broker.Configuration{ + Addresses: "localhost:1234", + Topic: "topic", + Group: "group", + } + if allowlist { + brokerCfg.OrgAllowlist = mapset.NewSetWith(types.OrgID(1)) + brokerCfg.OrgAllowlistEnabled = true + } else { + brokerCfg.OrgAllowlistEnabled = false + } + return createDVOConsumer(brokerCfg, s) +} + +func TestDVORulesConsumer_New(t *testing.T) { + helpers.RunTestWithTimeout(t, func(t testing.TB) { + sarama.Logger = log.New(os.Stdout, saramaLogPrefix, log.LstdFlags) + + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + mockBroker := sarama.NewMockBroker(t, 0) + defer mockBroker.Close() + + mockBroker.SetHandlerByMap(ira_helpers.GetHandlersMapForMockConsumer(t, mockBroker, testTopicName)) + + mockConsumer, err := consumer.NewDVORulesConsumer(broker.Configuration{ + Addresses: mockBroker.Addr(), + Topic: testTopicName, + Enabled: true, + }, mockStorage) + helpers.FailOnError(t, err) + + err = mockConsumer.Close() + helpers.FailOnError(t, err) + }, testCaseTimeLimit) +} + +func TestDeserializeEmptyDVOMessage(t *testing.T) { + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte("")) + assert.EqualError(t, err, "unexpected end of JSON input") +} + +func TestDeserializeDVOMessageWithWrongContent(t *testing.T) { + const message = `{"this":"is", "not":"expected content"}` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing required attribute") +} + +func TestDeserializeDVOMessageWithImproperJSON(t *testing.T) { + const message = `"this_is_not_json_dude"` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError( + t, + err, + "json: cannot unmarshal string into Go value of type consumer.incomingMessage", + ) +} + +func TestDeserializeDVOMessageWithImproperMetrics(t *testing.T) { + consumerMessage := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "LastChecked": "` + testdata.LastCheckedAt.Format(time.RFC3339) + `", + "Metrics": "this is not a JSON" + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(consumerMessage)) + assert.EqualError( + t, + err, + "json: cannot unmarshal string into Go struct field incomingMessage.Metrics of type consumer.DvoMetrics", + ) +} + +func TestDeserializeDVOProperMessage(t *testing.T) { + consumerMessage := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "LastChecked": "` + testdata.LastCheckedAt.Format(time.RFC3339) + `", + "Metrics":` + testMetrics + ` + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + message, err := consumer.DeserializeMessage(&c, []byte(consumerMessage)) + helpers.FailOnError(t, err) + assert.Equal(t, types.OrgID(1), *message.Organization) + assert.Equal(t, testdata.ClusterName, *message.ClusterName) +} + +func TestDeserializeDVOMessageWrongClusterName(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "this is not a UUID" + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "cluster name is not a UUID") +} + +func TestDeserializeDVOMessageWithoutOrgID(t *testing.T) { + message := `{ + "ClusterName": "` + string(testdata.ClusterName) + `" + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "missing required attribute 'OrgID'") +} + +func TestDeserializeDVOMessageWithoutClusterName(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + ` + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "missing required attribute 'ClusterName'") +} + +func TestDeserializeDVOMessageWithoutMetrics(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `" + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "missing required attribute 'Metrics'") +} + +func TestDeserializeDVOMessageWithEmptyReport(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Metrics": {} + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.Nil(t, err, "deserializeMessage should not return error for empty metrics") +} + +func TestDeserializeDVOMessageNullMetrics(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Metrics": null + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "missing required attribute 'Metrics'") +} + +func TestDeserializeCompressedDVOMessage(t *testing.T) { + consumerMessage := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "LastChecked": "` + testdata.LastCheckedAt.Format(time.RFC3339) + `", + "Metrics": { + "this_is_not": "a_proper_format", + "but": "this should be deserialized properly" + } + }` + compressed := compressConsumerMessage([]byte(consumerMessage)) + c := consumer.KafkaConsumer{MessageProcessor: consumer.DVORulesProcessor{}} + message, err := consumer.DeserializeMessage(&c, compressed) + helpers.FailOnError(t, err) + assert.Equal(t, types.OrgID(1), *message.Organization) + assert.Equal(t, testdata.ClusterName, *message.ClusterName) +} + +func TestParseEmptyDVOMessage(t *testing.T) { + message := sarama.ConsumerMessage{} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.EqualError(t, err, "unexpected end of JSON input") +} + +func TestParseDVOMessageWithWrongContent(t *testing.T) { + message := sarama.ConsumerMessage{Value: []byte(`{"this":"is", "not":"expected content"}`)} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'OrgID'") +} + +func TestParseProperDVOMessageWrongClusterName(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "this is not a UUID", + "Metrics": {} + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.EqualError(t, err, "cluster name is not a UUID") +} + +func TestParseDVOMessageWithoutOrgID(t *testing.T) { + data := `{ + "ClusterName": "` + string(testdata.ClusterName) + `", + "Metrics": {} + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'OrgID'") +} + +func TestParseDVOMessageWithoutClusterName(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "Metrics": {} + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'ClusterName'") +} + +func TestParseMessageWithoutMetrics(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `" + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'Metrics'") +} + +func TestParseDVOMessageEmptyMetrics(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Metrics": {} + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.Equal(t, types.ErrEmptyReport, err) +} + +func TestParseDVOMessageNullMetrics(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Metrics": null + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'Metrics'") +} + +func TestParseDVOMessageWithImproperJSON(t *testing.T) { + message := sarama.ConsumerMessage{Value: []byte(`"this_is_not_json_dude"`)} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.EqualError(t, err, "json: cannot unmarshal string into Go value of type consumer.incomingMessage") +} + +func TestParseDVOMessageWithImproperMetrics(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Metrics": "this is not a JSON" + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&dvoConsumer, &message) + assert.EqualError(t, err, "json: cannot unmarshal string into Go struct field incomingMessage.Metrics of type consumer.DvoMetrics") +} + +func TestParseDVOMessageWithProperMetrics(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "LastChecked": "` + testdata.LastCheckedAt.Format(time.RFC3339) + `", + "Metrics":` + testMetrics + ` + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + parsed, err := consumer.ParseMessage(&dvoConsumer, &message) + helpers.FailOnError(t, err) + assert.Equal(t, types.OrgID(1), *parsed.Organization) + assert.Equal(t, testdata.ClusterName, *parsed.ClusterName) + + var expectedDvoMetrics consumer.DvoMetrics + err = json.Unmarshal([]byte(testMetrics), &expectedDvoMetrics) + helpers.FailOnError(t, err) + assert.Equal(t, expectedDvoMetrics, *parsed.DvoMetrics) + + expectedWorkloads := []types.WorkloadRecommendation{ + { + ResponseID: "an_issue|DVO_AN_ISSUE", + Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", + Key: "DVO_AN_ISSUE", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + "samples": []interface{}{ + map[string]interface{}{ + "namespace_uid": "NAMESPACE-UID-A", "kind": "DaemonSet", "uid": "193a2099-1234-5678-916a-d570c9aac158", + }, + }, + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{ + { + Namespace: "namespace-name-A", + NamespaceUID: "NAMESPACE-UID-A", + Kind: "DaemonSet", + Name: "test-name-0099", + UID: "193a2099-1234-5678-916a-d570c9aac158", + }, + }, + }, + } + assert.EqualValues(t, expectedWorkloads, parsed.ParsedWorkloads) +} + +func TestProcessEmptyDVOMessage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + c := dummyDVOConsumer(mockStorage, true) + + message := sarama.ConsumerMessage{} + // message is empty -> nothing should be written into storage + err := c.HandleMessage(&message) + assert.EqualError(t, err, "unexpected end of JSON input") + + count, err := mockStorage.ReportsCount() + helpers.FailOnError(t, err) + + // no record should be written into database + assert.Equal( + t, + 0, + count, + "process message shouldn't write anything into the DB", + ) +} + +func TestProcessCorrectDVOMessage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + + defer closer() + + c := dummyDVOConsumer(mockStorage, true) + + message := sarama.ConsumerMessage{} + message.Value = []byte(messageReportWithDVOHits) + // message is correct -> one record should be written into storage + err := c.HandleMessage(&message) + helpers.FailOnError(t, err) + + count, err := mockStorage.ReportsCount() + helpers.FailOnError(t, err) + + // exactly one record should be written into database + assert.Equal(t, 1, count, "process message should write one record into DB") +} + +func TestProcessingEmptyMetricsMissingAttributesWithClosedStorage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + + mockConsumer := dummyDVOConsumer(mockStorage, true) + closer() + + err := consumerProcessMessage(mockConsumer, messageReportNoDVOMetrics) + helpers.FailOnError(t, err, "empty report should not be considered an error at HandleMessage level") +} + +func TestProcessingCorrectDVOMessageWithClosedStorage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + + mockConsumer := dummyDVOConsumer(mockStorage, true) + closer() + + err := consumerProcessMessage(mockConsumer, messageReportWithDVOHits) + assert.EqualError(t, err, "sql: database is closed") +} + +func TestProcessingDVOMessageWithWrongDateFormatReportNotEmpty(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + mockConsumer := dummyDVOConsumer(mockStorage, true) + + messageValue := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Metrics":` + testMetrics + `, + "LastChecked": "2020.01.23 16:15:59" + }` + err := consumerProcessMessage(mockConsumer, messageValue) + if _, ok := err.(*time.ParseError); err == nil || !ok { + t.Fatal(fmt.Errorf( + "expected time.ParseError error because date format is wrong. Got %+v", err, + )) + } +} + +func TestDVOKafkaConsumerMockOK(t *testing.T) { + helpers.RunTestWithTimeout(t, func(t testing.TB) { + mockConsumer, closer := ira_helpers.MustGetMockDVOConsumerWithExpectedMessages( + t, + testTopicName, + testOrgAllowlist, + []string{messageReportWithDVOHits}, + ) + + go mockConsumer.Serve() + + // wait for message processing + ira_helpers.WaitForMockConsumerToHaveNConsumedMessages(mockConsumer, 1) + + closer() + + assert.Equal(t, uint64(1), mockConsumer.KafkaConsumer.GetNumberOfSuccessfullyConsumedMessages()) + assert.Equal(t, uint64(0), mockConsumer.KafkaConsumer.GetNumberOfErrorsConsumingMessages()) + }, testCaseTimeLimit) +} diff --git a/consumer/export_test.go b/consumer/export_test.go index beca7aec0..05b1cf730 100644 --- a/consumer/export_test.go +++ b/consumer/export_test.go @@ -16,11 +16,7 @@ limitations under the License. package consumer -import ( - "time" - - "github.com/Shopify/sarama" -) +import "github.com/Shopify/sarama" // Export for testing // @@ -32,23 +28,30 @@ import ( // https://medium.com/@robiplus/golang-trick-export-for-test-aa16cbd7b8cd // to see why this trick is needed. var ( - DeserializeMessage = deserializeMessage - ParseReportContent = parseReportContent CheckReportStructure = checkReportStructure IsReportWithEmptyAttributes = isReportWithEmptyAttributes NumberOfExpectedKeysInReport = numberOfExpectedKeysInReport ExpectedKeysInReport = expectedKeysInReport ) -var ParseMessageTestStartTime = time.Now() - // Inc type is a trick to get golint to work for the ParseMessage defined below... type Inc struct { incomingMessage } -// ParseMessage reproduces the functionality of the private parseMessage function for testing +// DeserializeMessage returns the result of the private MessageProcessor.DeserializeMessage method +func DeserializeMessage(consumer *KafkaConsumer, msg []byte) (Inc, error) { + incomingMessage, err := consumer.MessageProcessor.deserializeMessage(msg) + return Inc{incomingMessage}, err +} + +// ParseMessage returns the result of the private MessageProcessor.parseMessage method func ParseMessage(consumer *KafkaConsumer, msg *sarama.ConsumerMessage) (Inc, error) { - incomingMessage, err := consumer.parseMessage(msg, ParseMessageTestStartTime) + incomingMessage, err := consumer.MessageProcessor.parseMessage(consumer, msg) return Inc{incomingMessage}, err } + +// ParseMessage returns the result of the private parseReportContent function +func ParseReportContent(message *Inc) error { + return parseReportContent(&message.incomingMessage) +} diff --git a/consumer/kafka_consumer.go b/consumer/kafka_consumer.go new file mode 100644 index 000000000..bcfd45b5f --- /dev/null +++ b/consumer/kafka_consumer.go @@ -0,0 +1,259 @@ +/* +Copyright © 2020, 2021, 2022, 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package consumer contains interface for any consumer that is able to +// process messages. It also contains implementation of Kafka consumer. +// +// It is expected that consumed messages are generated by ccx-data-pipeline +// based on OCP rules framework. The report generated by the framework are +// enhanced with more context information taken from different sources, like +// the organization ID, account number, unique cluster name, and the +// LastChecked timestamp (taken from the incoming Kafka record containing the +// URL to the archive). +// +// It is also expected that consumed messages contains one INFO rule hit that +// contains cluster version. That rule hit is produced by special rule used +// only in external data pipeline: +// "version_info|CLUSTER_VERSION_INFO" +package consumer + +import ( + "context" + "strings" + + "github.com/Shopify/sarama" + "github.com/rs/zerolog/log" + + "github.com/RedHatInsights/insights-results-aggregator/broker" + "github.com/RedHatInsights/insights-results-aggregator/producer" + "github.com/RedHatInsights/insights-results-aggregator/storage" +) + +// KafkaConsumer is an implementation of Consumer interface +// Example: +// +// KafkaConsumer, err := consumer.NewKafkaConsumer(brokerCfg, storage) +// +// if err != nil { +// panic(err) +// } +// +// KafkaConsumer.Serve() +// +// err := KafkaConsumer.Stop() +// +// if err != nil { +// panic(err) +// } +type KafkaConsumer struct { + Configuration broker.Configuration + ConsumerGroup sarama.ConsumerGroup + Storage storage.Storage + MessageProcessor MessageProcessor + numberOfSuccessfullyConsumedMessages uint64 + numberOfErrorsConsumingMessages uint64 + ready chan bool + cancel context.CancelFunc + payloadTrackerProducer *producer.PayloadTrackerProducer + deadLetterProducer *producer.DeadLetterProducer +} + +// DefaultSaramaConfig is a config which will be used by default +// here you can use specific version of a protocol for example +// useful for testing +var DefaultSaramaConfig *sarama.Config + +// NewKafkaConsumer constructs new implementation of Consumer interface +func NewKafkaConsumer(brokerCfg broker.Configuration, storage storage.Storage, processor MessageProcessor) (*KafkaConsumer, error) { + return NewKafkaConsumerWithSaramaConfig(brokerCfg, storage, DefaultSaramaConfig, processor) +} + +// NewKafkaConsumerWithSaramaConfig constructs new implementation of Consumer interface with custom sarama config +func NewKafkaConsumerWithSaramaConfig( + brokerCfg broker.Configuration, + storage storage.Storage, + saramaConfig *sarama.Config, + processor MessageProcessor, +) (*KafkaConsumer, error) { + var err error + + if saramaConfig == nil { + saramaConfig, err = broker.SaramaConfigFromBrokerConfig(brokerCfg) + if err != nil { + log.Error().Err(err).Msg("unable to create sarama configuration from current broker configuration") + return nil, err + } + } + + log.Info(). + Str("addresses", brokerCfg.Addresses). + Str("group", brokerCfg.Group). + Msg("New consumer group") + + consumerGroup, err := sarama.NewConsumerGroup(strings.Split(brokerCfg.Addresses, ","), brokerCfg.Group, saramaConfig) + if err != nil { + log.Error().Err(err).Msg("Unable to create consumer group") + return nil, err + } + log.Info().Msg("Consumer group has been created") + + log.Info().Msg("Constructing payload tracker producer") + payloadTrackerProducer, err := producer.NewPayloadTrackerProducer(brokerCfg) + if err != nil { + log.Error().Err(err).Msg("Unable to construct payload tracker producer") + return nil, err + } + if payloadTrackerProducer == nil { + log.Info().Msg("Payload tracker producer not configured") + } else { + log.Info().Msg("Payload tracker producer has been configured") + } + + log.Info().Msg("Constructing DLQ producer") + deadLetterProducer, err := producer.NewDeadLetterProducer(brokerCfg) + if err != nil { + log.Error().Err(err).Msg("Unable to construct dead letter producer") + return nil, err + } + if deadLetterProducer == nil { + log.Info().Msg("Dead letter producer not configured") + } else { + log.Info().Msg("Dead letter producer has been configured") + } + + consumer := &KafkaConsumer{ + Configuration: brokerCfg, + ConsumerGroup: consumerGroup, + Storage: storage, + MessageProcessor: processor, + numberOfSuccessfullyConsumedMessages: 0, + numberOfErrorsConsumingMessages: 0, + ready: make(chan bool), + payloadTrackerProducer: payloadTrackerProducer, + deadLetterProducer: deadLetterProducer, + } + + return consumer, nil +} + +// Serve starts listening for messages and processing them. It blocks current thread. +func (consumer *KafkaConsumer) Serve() { + ctx, cancel := context.WithCancel(context.Background()) + consumer.cancel = cancel + + go func() { + for { + // `Consume` should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims + if err := consumer.ConsumerGroup.Consume(ctx, []string{consumer.Configuration.Topic}, consumer); err != nil { + log.Fatal().Err(err).Msg("unable to recreate kafka session") + } + + // check if context was cancelled, signaling that the consumer should stop + if ctx.Err() != nil { + return + } + + log.Info().Msg("created new kafka session") + + consumer.ready = make(chan bool) + } + }() + + // Wait for the consumer to be set up + log.Info().Msg("waiting for consumer to become ready") + <-consumer.ready + log.Info().Msg("finished waiting for consumer to become ready") + + // Actual processing is done in goroutine created by sarama (see ConsumeClaim below) + log.Info().Msg("started serving consumer") + <-ctx.Done() + log.Info().Msg("context cancelled, exiting") + + cancel() +} + +// Setup is run at the beginning of a new session, before ConsumeClaim +func (consumer *KafkaConsumer) Setup(sarama.ConsumerGroupSession) error { + log.Info().Msg("new session has been setup") + // Mark the consumer as ready + close(consumer.ready) + return nil +} + +// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited +func (consumer *KafkaConsumer) Cleanup(sarama.ConsumerGroupSession) error { + log.Info().Msg("new session has been finished") + return nil +} + +// ConsumeClaim starts a consumer loop of ConsumerGroupClaim's Messages(). +func (consumer *KafkaConsumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + log.Info(). + Int64(offsetKey, claim.InitialOffset()). + Msg("starting messages loop") + + for message := range claim.Messages() { + err := consumer.HandleMessage(message) + if err != nil { + // already handled in HandleMessage, just log + log.Error().Err(err).Msg("Problem while handling the message") + } + session.MarkMessage(message, "") + } + + return nil +} + +// Close method closes all resources used by consumer +func (consumer *KafkaConsumer) Close() error { + if consumer.cancel != nil { + consumer.cancel() + } + + if consumer.ConsumerGroup != nil { + if err := consumer.ConsumerGroup.Close(); err != nil { + log.Error().Err(err).Msg("unable to close consumer group") + } + } + + if consumer.payloadTrackerProducer != nil { + if err := consumer.payloadTrackerProducer.Close(); err != nil { + log.Error().Err(err).Msg("unable to close payload tracker Kafka producer") + } + } + + if consumer.deadLetterProducer != nil { + if err := consumer.deadLetterProducer.Close(); err != nil { + log.Error().Err(err).Msg("unable to close dead letter Kafka producer") + } + } + + return nil +} + +// GetNumberOfSuccessfullyConsumedMessages returns number of consumed messages +// since creating KafkaConsumer obj +func (consumer *KafkaConsumer) GetNumberOfSuccessfullyConsumedMessages() uint64 { + return consumer.numberOfSuccessfullyConsumedMessages +} + +// GetNumberOfErrorsConsumingMessages returns number of errors during consuming messages +// since creating KafkaConsumer obj +func (consumer *KafkaConsumer) GetNumberOfErrorsConsumingMessages() uint64 { + return consumer.numberOfErrorsConsumingMessages +} diff --git a/consumer/logging.go b/consumer/logging.go index 0f5c045cd..20c544c44 100644 --- a/consumer/logging.go +++ b/consumer/logging.go @@ -19,6 +19,7 @@ import ( "time" "github.com/Shopify/sarama" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -31,27 +32,11 @@ func printableRequestID(message *incomingMessage) string { } func logMessageDebug(consumer *KafkaConsumer, originalMessage *sarama.ConsumerMessage, parsedMessage *incomingMessage, event string) { - log.Debug(). - Int(offsetKey, int(originalMessage.Offset)). - Int(partitionKey, int(originalMessage.Partition)). - Str(topicKey, consumer.Configuration.Topic). - Int(organizationKey, int(*parsedMessage.Organization)). - Str(clusterKey, string(*parsedMessage.ClusterName)). - Int(versionKey, int(parsedMessage.Version)). - Str(requestIDKey, printableRequestID(parsedMessage)). - Msg(event) + fillEvent(log.Debug(), consumer, originalMessage, parsedMessage).Msg(event) } func logMessageInfo(consumer *KafkaConsumer, originalMessage *sarama.ConsumerMessage, parsedMessage *incomingMessage, event string) { - log.Info(). - Int(offsetKey, int(originalMessage.Offset)). - Int(partitionKey, int(originalMessage.Partition)). - Str(topicKey, consumer.Configuration.Topic). - Int(organizationKey, int(*parsedMessage.Organization)). - Str(clusterKey, string(*parsedMessage.ClusterName)). - Int(versionKey, int(parsedMessage.Version)). - Str(requestIDKey, printableRequestID(parsedMessage)). - Msg(event) + fillEvent(log.Info(), consumer, originalMessage, parsedMessage).Msg(event) } func logClusterInfo(message *incomingMessage) { @@ -84,28 +69,46 @@ func logUnparsedMessageError(consumer *KafkaConsumer, originalMessage *sarama.Co } func logMessageError(consumer *KafkaConsumer, originalMessage *sarama.ConsumerMessage, parsedMessage *incomingMessage, event string, err error) { - log.Error(). - Int(offsetKey, int(originalMessage.Offset)). - Str(topicKey, consumer.Configuration.Topic). - Int(organizationKey, int(*parsedMessage.Organization)). - Str(clusterKey, string(*parsedMessage.ClusterName)). - Int(versionKey, int(parsedMessage.Version)). - Err(err). - Msg(event) + fillEvent(log.Error(), consumer, originalMessage, parsedMessage).Err(err).Msg(event) } func logMessageWarning(consumer *KafkaConsumer, originalMessage *sarama.ConsumerMessage, parsedMessage *incomingMessage, event string) { - log.Warn(). - Int(offsetKey, int(originalMessage.Offset)). - Int(partitionKey, int(originalMessage.Partition)). - Str(topicKey, consumer.Configuration.Topic). - Int(organizationKey, int(*parsedMessage.Organization)). - Str(clusterKey, string(*parsedMessage.ClusterName)). - Int(versionKey, int(parsedMessage.Version)). - Msg(event) + fillEvent(log.Warn(), consumer, originalMessage, parsedMessage).Msg(event) } func logDuration(tStart, tEnd time.Time, offset int64, key string) { duration := tEnd.Sub(tStart) log.Debug().Int64(durationKey, duration.Microseconds()).Int64(offsetKey, offset).Msg(key) } + +func fillEvent(baseEvent *zerolog.Event, consumer *KafkaConsumer, originalMessage *sarama.ConsumerMessage, parsedMessage *incomingMessage) *zerolog.Event { + baseEvent = baseEvent.Str(topicKey, consumer.Configuration.Topic) + + // Check for nil pointers before raising the log error (CCXDEV-12426) + if originalMessage == nil { + log.Debug().Msg("originalMessage is nil") + } else { + baseEvent = baseEvent. + Int(offsetKey, int(originalMessage.Offset)). + Int(partitionKey, int(originalMessage.Partition)) + } + if parsedMessage == nil { + log.Debug().Msg("parsedMessage is nil") + } else { + baseEvent = baseEvent. + Int(versionKey, int(parsedMessage.Version)). + Str(requestIDKey, printableRequestID(parsedMessage)) + if parsedMessage.Organization == nil { + log.Debug().Msg("*parsedMessage.Organization is nil") + } else { + baseEvent = baseEvent.Int(organizationKey, int(*parsedMessage.Organization)) + } + if parsedMessage.ClusterName == nil { + log.Debug().Msg("parsedMessage.ClusterName is nil") + } else { + baseEvent = baseEvent.Str(clusterKey, string(*parsedMessage.ClusterName)) + } + } + + return baseEvent +} diff --git a/consumer/message_decompression.go b/consumer/message_decompression.go new file mode 100644 index 000000000..08ad5a85d --- /dev/null +++ b/consumer/message_decompression.go @@ -0,0 +1,86 @@ +/* +Copyright © 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package consumer contains interface for any consumer that is able to +// process messages. It also contains implementation of Kafka consumer. +// +// It is expected that consumed messages are generated by ccx-data-pipeline +// based on OCP rules framework. The report generated by the framework are +// enhanced with more context information taken from different sources, like +// the organization ID, account number, unique cluster name, and the +// LastChecked timestamp (taken from the incoming Kafka record containing the +// URL to the archive). +// +// It is also expected that consumed messages contains one INFO rule hit that +// contains cluster version. That rule hit is produced by special rule used +// only in external data pipeline: +// "version_info|CLUSTER_VERSION_INFO" +package consumer + +import ( + "bytes" + "compress/gzip" + "io" + + "github.com/rs/zerolog/log" +) + +const ( + // first magic byte of stream/file compressed by Gzip + gzipMagicNumber1 = 31 + // second magic byte of stream/file compressed by Gzip + gzipMagicNumber2 = 139 +) + +// IsMessageInGzipFormat function checks if the format of the message is gzip +// if it is it will return true if not it will return false +func IsMessageInGzipFormat(messageValue []byte) bool { + if messageValue == nil { + return false + } + if len(messageValue) < 2 { + return false + } + // Checking for first 2 bytes in gzip stream witch are 31 and 139 + // see also https://en.wikipedia.org/wiki/Gzip + if messageValue[0] == gzipMagicNumber1 && messageValue[1] == gzipMagicNumber2 { + return true + } + return false +} + +// DecompressMessage will try to decompress the message if the message is compressed +// by using any supported method (GZIP at this moment) +func DecompressMessage(messageValue []byte) ([]byte, error) { + if IsMessageInGzipFormat(messageValue) { + reader := bytes.NewReader(messageValue) + gzipReader, err := gzip.NewReader(reader) + if err != nil { + return nil, err + } + defer func(r *gzip.Reader) { + if err := r.Close(); err != nil { + log.Error().Err(err).Msgf("failed to close gzip reader: %s", err.Error()) + } + }(gzipReader) + decompresed, err := io.ReadAll(gzipReader) + if err != nil { + return nil, err + } + return decompresed, err + } + return messageValue, nil +} diff --git a/consumer/message_decompression_test.go b/consumer/message_decompression_test.go new file mode 100644 index 000000000..25ff5376a --- /dev/null +++ b/consumer/message_decompression_test.go @@ -0,0 +1,93 @@ +/* +Copyright © 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consumer_test + +import ( + "bytes" + "compress/gzip" + "testing" + + "github.com/RedHatInsights/insights-results-aggregator/consumer" + "github.com/stretchr/testify/assert" +) + +func TestIsMessageInGzipFormatForEmptyMessage(t *testing.T) { + message := []byte{} + result := consumer.IsMessageInGzipFormat(message) + assert.False(t, result, "Improper message format detection for empty message") +} + +func TestIsMessageInGzipFormatForShortMessage(t *testing.T) { + message := []byte{31} + result := consumer.IsMessageInGzipFormat(message) + assert.False(t, result, "Improper message format detection for too short message") +} + +func TestIsMessageInGzipFormatForMessageWithExpectedHeader(t *testing.T) { + message := []byte{31, 139} + result := consumer.IsMessageInGzipFormat(message) + assert.True(t, result, "Improper message format detection for message with right header") +} + +func TestIsMessageInGzipFormatForCompressedMessage(t *testing.T) { + original := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + compressed := compressConsumerMessage(original) + + result := consumer.IsMessageInGzipFormat(compressed) + assert.True(t, result, "Improper message format detection for compressed message") +} + +func TestDecompressEmptyMessage(t *testing.T) { + message := []byte{} + decompressed, err := consumer.DecompressMessage(message) + assert.NoError(t, err, "No error is expected") + assert.Equal(t, message, decompressed, "Message should not be decompressed") +} + +func TestDecompressNonCompressedMessage(t *testing.T) { + message := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + decompressed, err := consumer.DecompressMessage(message) + assert.NoError(t, err, "No error is expected") + assert.Equal(t, message, decompressed, "Message should not be decompressed") +} + +func compressConsumerMessage(original []byte) []byte { + compressed := new(bytes.Buffer) + gzipWritter := gzip.NewWriter(compressed) + _, err := gzipWritter.Write(original) + if err != nil { + panic(err) + } + err = gzipWritter.Flush() + if err != nil { + panic(err) + } + err = gzipWritter.Close() + if err != nil { + panic(err) + } + return compressed.Bytes() +} + +func TestDecompressCompressedMessage(t *testing.T) { + original := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + compressed := compressConsumerMessage(original) + + decompressed, err := consumer.DecompressMessage(compressed) + assert.NoError(t, err, "No error is expected") + assert.Equal(t, original, decompressed, "Message is not decompressed properly") +} diff --git a/consumer/ocp_processing.go b/consumer/ocp_processing.go new file mode 100644 index 000000000..3e6eb398b --- /dev/null +++ b/consumer/ocp_processing.go @@ -0,0 +1,325 @@ +// Copyright 2020, 2021, 2022, 2023 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumer + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/RedHatInsights/insights-results-aggregator/storage" + + "github.com/RedHatInsights/insights-results-aggregator/producer" + "github.com/RedHatInsights/insights-results-aggregator/types" + "github.com/Shopify/sarama" + "github.com/google/uuid" + "github.com/rs/zerolog/log" +) + +// OCPRulesProcessor satisfies MessageProcessor interface +type OCPRulesProcessor struct { +} + +// deserializeMessage tries to unmarshall the received message +// and read all required attributes from it +func (OCPRulesProcessor) deserializeMessage(messageValue []byte) (incomingMessage, error) { + var deserialized incomingMessage + + err := json.Unmarshal(messageValue, &deserialized) + if err != nil { + return deserialized, err + } + + if deserialized.Organization == nil { + return deserialized, errors.New("missing required attribute 'OrgID'") + } + if deserialized.ClusterName == nil { + return deserialized, errors.New("missing required attribute 'ClusterName'") + } + if deserialized.Report == nil { + return deserialized, errors.New("missing required attribute 'Report'") + } + + _, err = uuid.Parse(string(*deserialized.ClusterName)) + + if err != nil { + return deserialized, errors.New("cluster name is not a UUID") + } + return deserialized, nil +} + +func (consumer *KafkaConsumer) writeOCPReport( + msg *sarama.ConsumerMessage, message incomingMessage, + reportAsBytes []byte, lastCheckedTime time.Time, +) error { + if ocpStorage, ok := consumer.Storage.(storage.OCPRecommendationsStorage); ok { + // timestamp when the report is about to be written into database + storedAtTime := time.Now() + + err := ocpStorage.WriteReportForCluster( + *message.Organization, + *message.ClusterName, + types.ClusterReport(reportAsBytes), + message.ParsedHits, + lastCheckedTime, + message.Metadata.GatheredAt, + storedAtTime, + message.RequestID, + ) + if err == types.ErrOldReport { + logMessageInfo(consumer, msg, &message, "Skipping because a more recent report already exists for this cluster") + return nil + } else if err != nil { + logMessageError(consumer, msg, &message, "Error writing report to database", err) + return err + } + logMessageDebug(consumer, msg, &message, "Stored report") + return nil + } + err := errors.New("report could not be stored") + logMessageError(consumer, msg, &message, unexpectedStorageType, err) + return err +} + +func (consumer *KafkaConsumer) writeRecommendations( + msg *sarama.ConsumerMessage, message incomingMessage, reportAsBytes []byte, +) (time.Time, error) { + if ocpStorage, ok := consumer.Storage.(storage.OCPRecommendationsStorage); ok { + err := ocpStorage.WriteRecommendationsForCluster( + *message.Organization, + *message.ClusterName, + types.ClusterReport(reportAsBytes), + types.Timestamp(time.Now().UTC().Format(time.RFC3339)), + ) + if err != nil { + logMessageError(consumer, msg, &message, "Error writing recommendations to database", err) + return time.Time{}, err + } + tStored := time.Now() + logMessageDebug(consumer, msg, &message, "Stored recommendations") + return tStored, nil + } + logMessageError(consumer, msg, &message, unexpectedStorageType, errors.New("recommendation could not be stored")) + return time.Time{}, nil +} + +func (consumer *KafkaConsumer) writeInfoReport( + msg *sarama.ConsumerMessage, message incomingMessage, infoStoredAtTime time.Time, +) error { + // it is expected that message.ParsedInfo contains at least one item: + // result from special INFO rule containing cluster version that is + // used just in external data pipeline + if ocpStorage, ok := consumer.Storage.(storage.OCPRecommendationsStorage); ok { + err := ocpStorage.WriteReportInfoForCluster( + *message.Organization, + *message.ClusterName, + message.ParsedInfo, + infoStoredAtTime, + ) + if errors.Is(err, types.ErrOldReport) { + logMessageInfo(consumer, msg, &message, "Skipping because a more recent info report already exists for this cluster") + return nil + } else if err != nil { + logMessageError(consumer, msg, &message, "Error writing info report to database", err) + return err + } + logMessageInfo(consumer, msg, &message, "Stored info report") + return nil + } + logMessageError(consumer, msg, &message, unexpectedStorageType, errors.New("info report could not be stored")) + return nil +} + +// processMessage processes an incoming message +func (processor OCPRulesProcessor) processMessage(consumer *KafkaConsumer, msg *sarama.ConsumerMessage) (types.RequestID, incomingMessage, error) { + return commonProcessMessage(consumer, msg, processor.storeInDB) +} + +func (OCPRulesProcessor) storeInDB(consumer *KafkaConsumer, msg *sarama.ConsumerMessage, message incomingMessage) (types.RequestID, incomingMessage, error) { + tStart := time.Now() + lastCheckedTime, err := consumer.retrieveLastCheckedTime(msg, &message) + if err != nil { + return message.RequestID, message, err + } + tTimeCheck := time.Now() + logDuration(tStart, tTimeCheck, msg.Offset, "time_check") + + reportAsBytes, err := json.Marshal(*message.Report) + if err != nil { + logMessageError(consumer, msg, &message, "Error marshalling report", err) + return message.RequestID, message, err + } + + err = consumer.writeOCPReport(msg, message, reportAsBytes, lastCheckedTime) + if err != nil { + return message.RequestID, message, err + } + tStored := time.Now() + logDuration(tTimeCheck, tStored, msg.Offset, "db_store_report") + + tRecommendationsStored, err := consumer.writeRecommendations(msg, message, reportAsBytes) + if err != nil { + return message.RequestID, message, err + } + logDuration(tStored, tRecommendationsStored, msg.Offset, "db_store_recommendations") + + // rule hits has been stored into database - time to log all these great info + logClusterInfo(&message) + + infoStoredAtTime := time.Now() + if err := consumer.writeInfoReport(msg, message, infoStoredAtTime); err != nil { + return message.RequestID, message, err + } + infoStored := time.Now() + logDuration(infoStoredAtTime, infoStored, msg.Offset, "db_store_info_report") + + return message.RequestID, message, nil +} + +// shouldProcess determines if a parsed message should be processed further +func (OCPRulesProcessor) shouldProcess(consumer *KafkaConsumer, consumed *sarama.ConsumerMessage, parsed *incomingMessage) error { + err := checkReportStructure(*parsed.Report) + if err != nil { + consumer.logReportStructureError(err, consumed) + return err + } + return nil +} + +func verifySystemAttributeIsEmpty(r Report) bool { + var s system + if err := json.Unmarshal(*r[reportAttributeSystem], &s); err != nil { + return false + } + if s.Hostname != "" { + return false + } + return true +} + +// isReportWithEmptyAttributes checks if the report is empty, or if the attributes +// expected in the report, minus the analysis_metadata, are empty. +// If this function returns true, this report will not be processed further as it is +// PROBABLY the result of an archive that was not processed by insights-core. +// see https://github.com/RedHatInsights/insights-results-aggregator/issues/1834 +func isReportWithEmptyAttributes(r Report) bool { + // Create attribute checkers for each attribute + for attr, attrData := range r { + // we don't care about the analysis_metadata attribute + if attr == reportAttributeMetadata { + continue + } + // special handling for the system attribute, as it comes with data when empty + if attr == reportAttributeSystem { + if !verifySystemAttributeIsEmpty(r) { + return false + } + continue + } + // Check if this attribute of the report is empty + checker := JSONAttributeChecker{data: *attrData} + if !checker.IsEmpty() { + return false + } + } + return true +} + +// checkReportStructure tests if the report has correct structure +func checkReportStructure(r Report) error { + // the structure is not well-defined yet, so all we should do is to check if all keys are there + + // 'skips' and 'info' keys are now optional, we should not expect them anymore: + // https://github.com/RedHatInsights/insights-results-aggregator/issues/1206 + keysNotFound := make([]string, 0, numberOfExpectedKeysInReport) + keysFound := 0 + // check if the structure contains all expected keys + for _, expectedKey := range expectedKeysInReport { + _, found := r[expectedKey] + if !found { + keysNotFound = append(keysNotFound, expectedKey) + } else { + keysFound++ + } + } + + if keysFound == numberOfExpectedKeysInReport { + return nil + } + + // empty reports mean that this message should not be processed further + isEmpty := len(r) == 0 || isReportWithEmptyAttributes(r) + if isEmpty { + log.Debug().Msg("Empty report or report with only empty attributes. Processing of this message will be skipped.") + return types.ErrEmptyReport + } + + // report is not empty, and some keys have not been found -> malformed + if len(keysNotFound) != 0 { + return fmt.Errorf("improper report structure, missing key(s) with name '%v'", keysNotFound) + } + + return nil +} + +// parseReportContent verifies the content of the Report structure and parses it into +// the relevant parts of the incomingMessage structure +func parseReportContent(message *incomingMessage) error { + err := json.Unmarshal(*((*message.Report)[reportAttributeReports]), &message.ParsedHits) + if err != nil { + return err + } + + // with support for Hypershift-enabled clusters, the info attribute became optional + _, infoExists := (*message.Report)[reportAttributeInfo] + if !infoExists { + log.Debug().Msgf("%s key does not exist in the JSON object", reportAttributeInfo) + return nil + } + + err = json.Unmarshal(*((*message.Report)[reportAttributeInfo]), &message.ParsedInfo) + if err != nil { + return err + } + + return nil +} + +// parseMessage is the entry point for parsing the received message. +// It should be the first method called within ProcessMessage in order +// to convert the message into a struct that can be worked with +func (OCPRulesProcessor) parseMessage(consumer *KafkaConsumer, msg *sarama.ConsumerMessage) (incomingMessage, error) { + message, err := consumer.MessageProcessor.deserializeMessage(msg.Value) + if err != nil { + consumer.logMsgForFurtherAnalysis(msg) + logUnparsedMessageError(consumer, msg, "Error parsing message from Kafka", err) + return message, err + } + + consumer.updatePayloadTracker(message.RequestID, time.Now(), message.Organization, message.Account, producer.StatusReceived) + + if err := consumer.MessageProcessor.shouldProcess(consumer, msg, &message); err != nil { + return message, err + } + + err = parseReportContent(&message) + if err != nil { + consumer.logReportStructureError(err, msg) + return message, err + } + + return message, nil +} diff --git a/consumer/ocp_rules_consumer.go b/consumer/ocp_rules_consumer.go new file mode 100644 index 000000000..aca21e969 --- /dev/null +++ b/consumer/ocp_rules_consumer.go @@ -0,0 +1,41 @@ +/* +Copyright © 2020, 2021, 2022, 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package consumer contains interface for any consumer that is able to +// process messages. It also contains implementation of Kafka consumer. +// +// It is expected that consumed messages are generated by ccx-data-pipeline +// based on OCP rules framework. The report generated by the framework are +// enhanced with more context information taken from different sources, like +// the organization ID, account number, unique cluster name, and the +// LastChecked timestamp (taken from the incoming Kafka record containing the +// URL to the archive). +// +// It is also expected that consumed messages contains one INFO rule hit that +// contains cluster version. That rule hit is produced by special rule used +// only in external data pipeline: +// "version_info|CLUSTER_VERSION_INFO" +package consumer + +import ( + "github.com/RedHatInsights/insights-results-aggregator/broker" + "github.com/RedHatInsights/insights-results-aggregator/storage" +) + +// NewOCPRulesConsumer constructs new implementation of Consumer interface +func NewOCPRulesConsumer(brokerCfg broker.Configuration, storage storage.OCPRecommendationsStorage) (*KafkaConsumer, error) { + return NewKafkaConsumer(brokerCfg, storage, OCPRulesProcessor{}) +} diff --git a/consumer/ocp_rules_consumer_test.go b/consumer/ocp_rules_consumer_test.go new file mode 100644 index 000000000..e3290b14b --- /dev/null +++ b/consumer/ocp_rules_consumer_test.go @@ -0,0 +1,999 @@ +/* +Copyright © 2020, 2021, 2022, 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consumer_test + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "os" + "testing" + "time" + + "github.com/RedHatInsights/insights-operator-utils/tests/saramahelpers" + "github.com/RedHatInsights/insights-results-aggregator/storage" + + ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" + zerolog_log "github.com/rs/zerolog/log" + + "github.com/RedHatInsights/insights-operator-utils/tests/helpers" + "github.com/RedHatInsights/insights-results-aggregator-data/testdata" + "github.com/Shopify/sarama" + mapset "github.com/deckarep/golang-set" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + + "github.com/RedHatInsights/insights-results-aggregator/broker" + "github.com/RedHatInsights/insights-results-aggregator/consumer" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +func init() { + zerolog.SetGlobalLevel(zerolog.WarnLevel) +} + +var ( + ocpConsumer = consumer.KafkaConsumer{ + MessageProcessor: consumer.OCPRulesProcessor{}, + } +) + +func createOCPConsumer(brokerCfg broker.Configuration, mockStorage storage.OCPRecommendationsStorage) *consumer.KafkaConsumer { + return &consumer.KafkaConsumer{ + Configuration: brokerCfg, + Storage: mockStorage, + MessageProcessor: consumer.OCPRulesProcessor{}, + } +} + +func dummyOCPConsumer(s storage.OCPRecommendationsStorage, allowlist bool) consumer.Consumer { + brokerCfg := broker.Configuration{ + Addresses: "localhost:1234", + Topic: "topic", + Group: "group", + } + if allowlist { + brokerCfg.OrgAllowlist = mapset.NewSetWith(types.OrgID(1)) + brokerCfg.OrgAllowlistEnabled = true + } else { + brokerCfg.OrgAllowlistEnabled = false + } + return createOCPConsumer(brokerCfg, s) +} + +func TestOCPRulesConsumer_New(t *testing.T) { + helpers.RunTestWithTimeout(t, func(t testing.TB) { + sarama.Logger = log.New(os.Stdout, saramaLogPrefix, log.LstdFlags) + + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + mockBroker := sarama.NewMockBroker(t, 0) + defer mockBroker.Close() + + mockBroker.SetHandlerByMap(ira_helpers.GetHandlersMapForMockConsumer(t, mockBroker, testTopicName)) + + mockConsumer, err := consumer.NewOCPRulesConsumer(broker.Configuration{ + Addresses: mockBroker.Addr(), + Topic: testTopicName, + Enabled: true, + }, mockStorage) + helpers.FailOnError(t, err) + + err = mockConsumer.Close() + helpers.FailOnError(t, err) + }, testCaseTimeLimit) +} + +func TestDeserializeEmptyMessage(t *testing.T) { + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte("")) + assert.EqualError(t, err, "unexpected end of JSON input") +} + +func TestDeserializeMessageWithWrongContent(t *testing.T) { + const message = `{"this":"is", "not":"expected content"}` + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.Error(t, err) + assert.Contains(t, err.Error(), "missing required attribute") +} + +func TestDeserializeMessageWithImproperJSON(t *testing.T) { + const message = `"this_is_not_json_dude"` + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError( + t, + err, + "json: cannot unmarshal string into Go value of type consumer.incomingMessage", + ) +} + +func TestDeserializeMessageWithImproperReport(t *testing.T) { + consumerMessage := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "LastChecked": "` + testdata.LastCheckedAt.Format(time.RFC3339) + `", + "Report": { + "system": { + "metadata": {}, + "hostname": null + }, + "reports": "blablablabla", + "fingerprints": [], + "skips": [], + "info": [] + } + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + message, err := consumer.DeserializeMessage(&c, []byte(consumerMessage)) + helpers.FailOnError(t, err) + assert.Equal(t, types.OrgID(1), *message.Organization) + assert.Equal(t, testdata.ClusterName, *message.ClusterName) +} + +func TestDeserializeProperMessage(t *testing.T) { + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + message, err := consumer.DeserializeMessage(&c, []byte(testdata.ConsumerMessage)) + helpers.FailOnError(t, err) + assert.Equal(t, types.OrgID(1), *message.Organization) + assert.Equal(t, testdata.ClusterName, *message.ClusterName) +} + +func TestDeserializeMessageWrongClusterName(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "this is not a UUID", + "Report": ` + testdata.ConsumerReport + ` + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "cluster name is not a UUID") +} + +func TestDeserializeMessageWithoutOrgID(t *testing.T) { + message := `{ + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report": ` + testdata.ConsumerReport + ` + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "missing required attribute 'OrgID'") +} + +func TestDeserializeMessageWithoutClusterName(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "Report": ` + testdata.ConsumerReport + ` + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "missing required attribute 'ClusterName'") +} + +func TestDeserializeMessageWithoutReport(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `" + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "missing required attribute 'Report'") +} + +func TestDeserializeMessageWithEmptyReport(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report": {} + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.Nil(t, err, "deserializeMessage should not return error for empty report") +} + +func TestDeserializeMessageNullReport(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report": null + }` + c := consumer.KafkaConsumer{MessageProcessor: consumer.OCPRulesProcessor{}} + _, err := consumer.DeserializeMessage(&c, []byte(message)) + assert.EqualError(t, err, "missing required attribute 'Report'") +} + +func TestIsReportWithEmptyAttributesAllEmpty(t *testing.T) { + r := consumer.Report{ + "system": unmarshall(`{"metadata": {}, "hostname": null}`), + "reports": unmarshall("[]"), + "fingerprints": unmarshall("[]"), + "skips": unmarshall("[]"), + "info": unmarshall("[]"), + } + isEmpty := consumer.IsReportWithEmptyAttributes(r) + assert.True(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = true for this report") +} + +func TestIsReportWithEmptyAttributesEmptyReport(t *testing.T) { + r := consumer.Report{} + isEmpty := consumer.IsReportWithEmptyAttributes(r) + assert.True(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = true for this report") +} + +func TestIsReportWithEmptyAttributesSystemDataIsPresent(t *testing.T) { + r := consumer.Report{ + "system": unmarshall(`{"metadata": {}, "hostname": "a_hostname_that_can_be_unmarshalled"}`), + "reports": unmarshall("[]"), + "fingerprints": unmarshall("[]"), + "skips": unmarshall("[]"), + "info": unmarshall("[]"), + } + isEmpty := consumer.IsReportWithEmptyAttributes(r) + assert.False(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = false for this report") +} + +// Additional existence check have been added to avoid the possibility of +// panicking due to a nil pointer exception +func TestIsReportWithEmptyAttributesLessAttributes(t *testing.T) { + r := consumer.Report{ + "system": unmarshall(`{"metadata": {}, "hostname": "a_hostname_that_can_be_unmarshalled"}`), + "reports": unmarshall("[]"), + "fingerprints": unmarshall("[]"), + } + isEmpty := consumer.IsReportWithEmptyAttributes(r) + assert.False(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = false for this report") +} + +func TestIsReportWithEmptyAttributesInfoIsNotPresent(t *testing.T) { + r := consumer.Report{ + "system": unmarshall(`{"metadata": {}, "hostname": null}`), + "reports": unmarshall("[]"), + "fingerprints": unmarshall("[]"), + "skips": unmarshall("[]"), + } + isEmpty := consumer.IsReportWithEmptyAttributes(r) + assert.True(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = true for this report") +} + +func TestIsReportWithEmptyAttributesReportsIsPresent(t *testing.T) { + reportItems := `[ + { + "component": "test.rule", + "key": "test.key", + "details": ` + helpers.ToJSONString(testdata.Rule1ExtraData) + ` + } + ]` + r := consumer.Report{ + "system": unmarshall(`{"metadata": {}, "hostname": null}`), + "reports": unmarshall(reportItems), + "fingerprints": unmarshall("[]"), + "skips": unmarshall("[]"), + } + isEmpty := consumer.IsReportWithEmptyAttributes(r) + assert.False(t, isEmpty, "IsReportWithEmptyAttributes should return isEmpty = false for this report") +} + +func TestCheckReportStructureEmptyReport(t *testing.T) { + report := consumer.Report{} + + err := consumer.CheckReportStructure(report) + assert.EqualError(t, err, "empty report found in deserialized message") +} + +func TestCheckReportStructureReportWithAllAttributesPresentAndEmpty(t *testing.T) { + report := consumer.Report{ + "fingerprints": unmarshall("[]"), + "info": unmarshall("[]"), + "reports": unmarshall("[]"), + "skips": unmarshall("[]"), + "system": unmarshall("{}"), + } + err := consumer.CheckReportStructure(report) + helpers.FailOnError(t, err, "empty report with all expected attributes present should be processed") +} + +func TestCheckReportStructureReportWithAnalysisMetadata(t *testing.T) { + report := consumer.Report{ + "system": unmarshall(`{"metadata": {}, "hostname": null}`), + "fingerprints": unmarshall("[]"), + "analysis_metadata": unmarshall(`{"start": "2023-09-11T18:33:14.527845+00:00", "finish": "2023-09-11T18:33:15.632777+00:00"}`), + } + err := consumer.CheckReportStructure(report) + assert.EqualError(t, err, "empty report found in deserialized message") +} + +// If some attributes are missing, but all the present attributes are empty, we just +// skip the processing of the message. +func TestCheckReportStructureReportWithEmptyAndMissingAttributes(t *testing.T) { + report := consumer.Report{ + "fingerprints": unmarshall("[]"), + "reports": unmarshall("[]"), + "skips": unmarshall("[]"), + } + err := consumer.CheckReportStructure(report) + assert.EqualError(t, err, "empty report found in deserialized message") +} + +func TestCheckReportStructureReportWithItems(t *testing.T) { + report := consumer.Report{ + "fingerprints": unmarshall("[]"), + "reports": unmarshall(string(testdata.Report2Rules)), + "skips": unmarshall("[]"), + "system": unmarshall(`{"metadata": {},"hostname": null}`), + } + err := consumer.CheckReportStructure(report) + assert.Nil(t, err, "checkReportStructure should return err = nil for empty reports") +} + +func TestParseReportContentEmptyReportsAttribute(t *testing.T) { + deserialized, err := consumer.DeserializeMessage(&ocpConsumer, []byte(testdata.ConsumerMessage)) + assert.Nil(t, err, "deserializeMessage should not return error for this message") + + err = consumer.ParseReportContent(&deserialized) + assert.Nil(t, err, "ParseReportContent should not return nil for empty report") +} + +func TestParseReportContentValidReport(t *testing.T) { + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report":` + string(testdata.Report2Rules) + ` + }` + + deserialized, err := consumer.DeserializeMessage(&ocpConsumer, []byte(message)) + assert.Nil(t, err, "deserializeMessage should not return error for this message") + + err = consumer.ParseReportContent(&deserialized) + assert.Nil(t, err, "ParseReportContent should return nil for reports with proper structure") +} + +func TestParseEmptyMessage(t *testing.T) { + message := sarama.ConsumerMessage{} + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "unexpected end of JSON input") +} + +func TestParseMessageWithWrongContent(t *testing.T) { + message := sarama.ConsumerMessage{Value: []byte(`{"this":"is", "not":"expected content"}`)} + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'OrgID'") +} + +func TestParseProperMessageWrongClusterName(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "this is not a UUID", + "Report": ` + testdata.ConsumerReport + ` + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "cluster name is not a UUID") +} + +func TestParseMessageWithoutOrgID(t *testing.T) { + data := `{ + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report": ` + testdata.ConsumerReport + ` + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'OrgID'") +} + +func TestParseMessageWithoutClusterName(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "Report": ` + testdata.ConsumerReport + ` + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'ClusterName'") +} + +func TestParseMessageWithoutReport(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `" + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'Report'") +} + +func TestParseMessageEmptyReport(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report": {} + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "empty report found in deserialized message") +} +func TestParseMessageNullReport(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report": null + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "missing required attribute 'Report'") +} + +func TestParseMessageWithImproperJSON(t *testing.T) { + message := sarama.ConsumerMessage{Value: []byte(`"this_is_not_json_dude"`)} + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "json: cannot unmarshal string into Go value of type consumer.incomingMessage") +} + +func TestParseMessageWithImproperReport(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "LastChecked": "` + testdata.LastCheckedAt.Format(time.RFC3339) + `", + "Report": { + "system": { + "metadata": {}, + "hostname": null + }, + "reports": "blablablabla", + "fingerprints": [], + "skips": [], + "info": [] + } + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + + _, err := consumer.ParseMessage(&ocpConsumer, &message) + assert.EqualError(t, err, "json: cannot unmarshal string into Go value of type []types.ReportItem") +} + +func TestParseProperMessageReportWithEmptyAttributes(t *testing.T) { + message := sarama.ConsumerMessage{Value: []byte(testdata.ConsumerMessage)} + parsed, err := consumer.ParseMessage(&ocpConsumer, &message) + helpers.FailOnError(t, err, "empty report with all expected attributes present should be processed") + + assert.Equal(t, types.OrgID(1), *parsed.Organization) + assert.Equal(t, testdata.ClusterName, *parsed.ClusterName) + + var expectedReport consumer.Report + err = json.Unmarshal([]byte(testdata.ConsumerReport), &expectedReport) + helpers.FailOnError(t, err) + assert.Equal(t, expectedReport, *parsed.Report) + assert.EqualValues(t, []types.ReportItem{}, parsed.ParsedHits) +} + +func TestParseProperMessageWithInfoReport(t *testing.T) { + // this report contains just one rule hit: from special INFO rule used + // just in external data pipeline + consumerReport := `{ + "fingerprints": [], + "reports": [], + "skips": [], + "system": {}, + "info": [ + { + "info_id": "version_info|CLUSTER_VERSION_INFO", + "component": "ccx_rules_processing.version_info.report", + "type": "info", + "key": "CLUSTER_VERSION_INFO", + "details": { + "version": "4.9", + "type": "info", + "info_key": "CLUSTER_VERSION_INFO" + }, + "tags": [], + "links": {} + } + ] + }` + message := sarama.ConsumerMessage{Value: []byte(createConsumerMessage(consumerReport))} + parsed, err := consumer.ParseMessage(&ocpConsumer, &message) + helpers.FailOnError(t, err, "this message is valid and should be processed") + assert.Equal(t, types.OrgID(1), *parsed.Organization) + assert.Equal(t, testdata.ClusterName, *parsed.ClusterName) + + var expectedReport consumer.Report + err = json.Unmarshal([]byte(consumerReport), &expectedReport) + helpers.FailOnError(t, err) + assert.Equal(t, expectedReport, *parsed.Report) + assert.EqualValues(t, []types.ReportItem{}, parsed.ParsedHits) + + expectedInfoReport := []types.InfoItem{ + { + InfoID: "version_info|CLUSTER_VERSION_INFO", + InfoKey: "CLUSTER_VERSION_INFO", + Details: map[string]string{ + "version": "4.9", + "type": "info", + "info_key": "CLUSTER_VERSION_INFO", + }, + }, + } + assert.EqualValues(t, expectedInfoReport, parsed.ParsedInfo) +} + +func TestParseProperMessageNoInfoAttribute(t *testing.T) { + data := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "LastChecked": "` + testdata.LastCheckedAt.Format(time.RFC3339) + `", + "Report": { + "system": { + "metadata": {}, + "hostname": null + }, + "reports": [ + { + "component": "` + string(testdata.Rule2ID) + `", + "key": "` + testdata.ErrorKey2 + `", + "user_vote": 0, + "disabled": ` + fmt.Sprint(testdata.Rule2Disabled) + `, + "details": ` + helpers.ToJSONString(testdata.Rule2ExtraData) + ` + } + ], + "fingerprints": [], + "skips": [] + } + }` + message := sarama.ConsumerMessage{Value: []byte(data)} + + _, err := consumer.ParseMessage(&ocpConsumer, &message) + helpers.FailOnError(t, err) +} + +func TestProcessEmptyMessage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + c := dummyOCPConsumer(mockStorage, true) + + message := sarama.ConsumerMessage{} + // message is empty -> nothing should be written into storage + err := c.HandleMessage(&message) + assert.EqualError(t, err, "unexpected end of JSON input") + + count, err := mockStorage.ReportsCount() + helpers.FailOnError(t, err) + + // no record should be written into database + assert.Equal( + t, + 0, + count, + "process message shouldn't write anything into the DB", + ) +} + +func TestProcessCorrectMessage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + + defer closer() + + c := dummyOCPConsumer(mockStorage, true) + + message := sarama.ConsumerMessage{} + message.Value = []byte(messageReportWithRuleHits) + // message is correct -> one record should be written into storage + err := c.HandleMessage(&message) + helpers.FailOnError(t, err) + + count, err := mockStorage.ReportsCount() + helpers.FailOnError(t, err) + + // exactly one record should be written into database + assert.Equal(t, 1, count, "process message should write one record into DB") +} + +func TestProcessingEmptyReportMissingAttributesWithClosedStorage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + + mockConsumer := dummyOCPConsumer(mockStorage, true) + closer() + + err := consumerProcessMessage(mockConsumer, messageNoReportsNoInfo) + helpers.FailOnError(t, err, "empty report should not be considered an error at HandleMessage level") +} + +func TestProcessingValidMessageEmptyReportWithRequiredAttributesWithClosedStorage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + + mockConsumer := dummyOCPConsumer(mockStorage, true) + closer() + + err := consumerProcessMessage(mockConsumer, testdata.ConsumerMessage) + assert.EqualError(t, err, "sql: database is closed") +} + +func TestProcessingCorrectMessageWithClosedStorage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + + mockConsumer := dummyOCPConsumer(mockStorage, true) + closer() + + err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) + assert.EqualError(t, err, "sql: database is closed") +} + +func TestProcessingMessageWithWrongDateFormatAndEmptyReport(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + mockConsumer := dummyOCPConsumer(mockStorage, true) + + err := consumerProcessMessage(mockConsumer, messageNoReportsNoInfo) + assert.Nil(t, err, "Message with empty report should not be processed") +} + +func TestProcessingMessageWithWrongDateFormatReportNotEmpty(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + mockConsumer := dummyOCPConsumer(mockStorage, true) + + messageValue := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report":` + testReport + `, + "LastChecked": "2020.01.23 16:15:59" + }` + err := consumerProcessMessage(mockConsumer, messageValue) + if _, ok := err.(*time.ParseError); err == nil || !ok { + t.Fatal(fmt.Errorf( + "expected time.ParseError error because date format is wrong. Got %+v", err, + )) + } +} + +func TestKafkaConsumerMockOK(t *testing.T) { + helpers.RunTestWithTimeout(t, func(t testing.TB) { + mockConsumer, closer := ira_helpers.MustGetMockOCPRulesConsumerWithExpectedMessages( + t, + testTopicName, + testOrgAllowlist, + []string{messageReportWithRuleHits}, + ) + + go mockConsumer.Serve() + + // wait for message processing + ira_helpers.WaitForMockConsumerToHaveNConsumedMessages(mockConsumer, 1) + + closer() + + assert.Equal(t, uint64(1), mockConsumer.KafkaConsumer.GetNumberOfSuccessfullyConsumedMessages()) + assert.Equal(t, uint64(0), mockConsumer.KafkaConsumer.GetNumberOfErrorsConsumingMessages()) + }, testCaseTimeLimit) +} + +func TestKafkaConsumerMockBadMessage(t *testing.T) { + helpers.RunTestWithTimeout(t, func(t testing.TB) { + mockConsumer, closer := ira_helpers.MustGetMockOCPRulesConsumerWithExpectedMessages( + t, + testTopicName, + testOrgAllowlist, + []string{"bad message"}, + ) + + go mockConsumer.Serve() + + // wait for message processing + ira_helpers.WaitForMockConsumerToHaveNConsumedMessages(mockConsumer, 1) + + closer() + + assert.Equal(t, uint64(0), mockConsumer.KafkaConsumer.GetNumberOfSuccessfullyConsumedMessages()) + assert.Equal(t, uint64(1), mockConsumer.KafkaConsumer.GetNumberOfErrorsConsumingMessages()) + }, testCaseTimeLimit) +} + +func TestKafkaConsumerMockWritingMsgWithEmptyReportToClosedStorage(t *testing.T) { + helpers.RunTestWithTimeout(t, func(t testing.TB) { + mockConsumer, closer := ira_helpers.MustGetMockOCPRulesConsumerWithExpectedMessages( + t, testTopicName, testOrgAllowlist, []string{messageNoReportsNoInfo}, + ) + + err := mockConsumer.KafkaConsumer.Storage.Close() + helpers.FailOnError(t, err) + + go mockConsumer.Serve() + + ira_helpers.WaitForMockConsumerToHaveNConsumedMessages(mockConsumer, 1) + + closer() + + // Since the report is present but empty, we stop processing this message without errors + assert.Equal(t, uint64(1), mockConsumer.KafkaConsumer.GetNumberOfSuccessfullyConsumedMessages()) + assert.Equal(t, uint64(0), mockConsumer.KafkaConsumer.GetNumberOfErrorsConsumingMessages()) + }, testCaseTimeLimit) +} + +func TestKafkaConsumerMockWritingMsgWithReportToClosedStorage(t *testing.T) { + helpers.RunTestWithTimeout(t, func(t testing.TB) { + mockConsumer, closer := ira_helpers.MustGetMockOCPRulesConsumerWithExpectedMessages( + t, testTopicName, testOrgAllowlist, []string{messageReportWithRuleHits}, + ) + + err := mockConsumer.KafkaConsumer.Storage.Close() + helpers.FailOnError(t, err) + + go mockConsumer.Serve() + + ira_helpers.WaitForMockConsumerToHaveNConsumedMessages(mockConsumer, 1) + + closer() + + // Since the report is present and not empty, it is processed, and we reach the closed DB error + assert.Equal(t, uint64(0), mockConsumer.KafkaConsumer.GetNumberOfSuccessfullyConsumedMessages()) + assert.Equal(t, uint64(1), mockConsumer.KafkaConsumer.GetNumberOfErrorsConsumingMessages()) + }, testCaseTimeLimit) +} + +func TestKafkaConsumer_ProcessMessage_OrganizationAllowlistDisabled(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + mockConsumer := dummyOCPConsumer(mockStorage, false) + + err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) + helpers.FailOnError(t, err) +} + +func TestKafkaConsumer_ProcessMessageWithEmptyReport_OrganizationIsNotAllowed(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + brokerCfg := broker.Configuration{ + Addresses: "localhost:1234", + Topic: "topic", + Group: "group", + OrgAllowlist: mapset.NewSetWith(types.OrgID(123)), // in testdata, OrgID = 1 + OrgAllowlistEnabled: true, + } + mockConsumer := createOCPConsumer(brokerCfg, mockStorage) + + err := consumerProcessMessage(mockConsumer, messageNoReportsNoInfo) + helpers.FailOnError(t, err, "message have empty report and should not be processed") +} + +func TestKafkaConsumer_ProcessMessage_OrganizationIsNotAllowed(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + brokerCfg := broker.Configuration{ + Addresses: "localhost:1234", + Topic: "topic", + Group: "group", + OrgAllowlist: mapset.NewSetWith(types.OrgID(123)), // in testdata, OrgID = 1 + OrgAllowlistEnabled: true, + } + mockConsumer := createOCPConsumer(brokerCfg, mockStorage) + + err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) + assert.EqualError(t, err, organizationIDNotInAllowList) +} + +func TestKafkaConsumer_ProcessMessageWithEmptyReport_OrganizationBadConfigIsNotAllowed(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + brokerCfg := broker.Configuration{ + Addresses: "localhost:1234", + Topic: "topic", + Group: "group", + OrgAllowlist: nil, + OrgAllowlistEnabled: true, + } + mockConsumer := createOCPConsumer(brokerCfg, mockStorage) + + err := consumerProcessMessage(mockConsumer, messageNoReportsNoInfo) + helpers.FailOnError(t, err, "message have empty report and should not be processed") +} + +func TestKafkaConsumer_ProcessMessage_OrganizationBadConfigIsNotAllowed(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + brokerCfg := broker.Configuration{ + Addresses: "localhost:1234", + Topic: "topic", + Group: "group", + OrgAllowlist: nil, + OrgAllowlistEnabled: true, + } + mockConsumer := createOCPConsumer(brokerCfg, mockStorage) + + err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) + assert.EqualError(t, err, organizationIDNotInAllowList) +} + +func TestKafkaConsumer_ProcessMessage_MessageFromTheFuture(t *testing.T) { + buf := new(bytes.Buffer) + zerolog_log.Logger = zerolog.New(buf) + + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + mockConsumer := createOCPConsumer(wrongBrokerCfg, mockStorage) + + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report":` + testReport + `, + "LastChecked": "` + time.Now().Add(24*time.Hour).Format(time.RFC3339) + `" + }` + + err := consumerProcessMessage(mockConsumer, message) + helpers.FailOnError(t, err) + assert.Contains(t, buf.String(), "got a message from the future") +} + +func TestKafkaConsumer_ProcessMessage_MoreRecentReportAlreadyExists(t *testing.T) { + zerolog.SetGlobalLevel(zerolog.InfoLevel) + buf := new(bytes.Buffer) + zerolog_log.Logger = zerolog.New(buf) + + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + mockConsumer := createOCPConsumer(wrongBrokerCfg, mockStorage) + + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report":` + testReport + `, + "LastChecked": "` + time.Now().Format(time.RFC3339) + `" + }` + + err := consumerProcessMessage(mockConsumer, message) + helpers.FailOnError(t, err) + + message = `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report":` + testReport + `, + "LastChecked": "` + time.Now().Add(-24*time.Hour).Format(time.RFC3339) + `" + }` + + err = consumerProcessMessage(mockConsumer, message) + helpers.FailOnError(t, err) + + assert.Contains(t, buf.String(), "Skipping because a more recent report already exists for this cluster") +} + +func TestKafkaConsumer_ProcessMessage_MessageWithNoSchemaVersion(t *testing.T) { + buf := new(bytes.Buffer) + zerolog_log.Logger = zerolog.New(buf) + + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + mockConsumer := createOCPConsumer(wrongBrokerCfg, mockStorage) + + err := consumerProcessMessage(mockConsumer, messageReportWithRuleHits) + helpers.FailOnError(t, err) + assert.Contains(t, buf.String(), "\"level\":\"warn\"") + assert.Contains(t, buf.String(), "Received data with unexpected version") +} + +func TestKafkaConsumer_ProcessMessage_MessageWithUnexpectedSchemaVersion(t *testing.T) { + buf := new(bytes.Buffer) + zerolog_log.Logger = zerolog.New(buf) + + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + mockConsumer := createOCPConsumer(wrongBrokerCfg, mockStorage) + + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report":` + testReport + `, + "LastChecked": "` + time.Now().Add(-24*time.Hour).Format(time.RFC3339) + `", + "Version": ` + fmt.Sprintf("%d", types.SchemaVersion(3)) + ` + }` + + err := consumerProcessMessage(mockConsumer, message) + helpers.FailOnError(t, err) + assert.Contains(t, buf.String(), "\"level\":\"warn\"") + assert.Contains(t, buf.String(), "Received data with unexpected version") +} + +func TestKafkaConsumer_ProcessMessage_MessageWithExpectedSchemaVersion(t *testing.T) { + buf := new(bytes.Buffer) + zerolog_log.Logger = zerolog.New(buf) + + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + mockConsumer := createOCPConsumer(wrongBrokerCfg, mockStorage) + + message := `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report":` + testReport + `, + "LastChecked": "` + time.Now().Add(-24*time.Hour).Format(time.RFC3339) + `", + "Version": ` + fmt.Sprintf("%d", types.SchemaVersion(1)) + ` + }` + + err := consumerProcessMessage(mockConsumer, message) + helpers.FailOnError(t, err) + + message = `{ + "OrgID": ` + fmt.Sprint(testdata.OrgID) + `, + "ClusterName": "` + string(testdata.ClusterName) + `", + "Report":` + testReport + `, + "LastChecked": "` + time.Now().Add(-24*time.Hour).Format(time.RFC3339) + `", + "Version": ` + fmt.Sprintf("%d", types.SchemaVersion(2)) + ` + }` + + err = consumerProcessMessage(mockConsumer, message) + helpers.FailOnError(t, err) + + assert.NotContains(t, buf.String(), "\"level\":\"warn\"") + assert.NotContains(t, buf.String(), "Received data with unexpected version") +} + +func TestKafkaConsumer_ConsumeClaim(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + kafkaConsumer := createOCPConsumer(broker.Configuration{}, mockStorage) + + mockConsumerGroupSession := &saramahelpers.MockConsumerGroupSession{} + mockConsumerGroupClaim := saramahelpers.NewMockConsumerGroupClaim(nil) + + err := kafkaConsumer.ConsumeClaim(mockConsumerGroupSession, mockConsumerGroupClaim) + helpers.FailOnError(t, err) +} + +func TestKafkaConsumer_ConsumeClaim_DBError(t *testing.T) { + buf := new(bytes.Buffer) + zerolog_log.Logger = zerolog.New(buf) + + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + closer() + + kafkaConsumer := createOCPConsumer(broker.Configuration{}, mockStorage) + + mockConsumerGroupSession := &saramahelpers.MockConsumerGroupSession{} + mockConsumerGroupClaim := saramahelpers.NewMockConsumerGroupClaim(nil) + + err := kafkaConsumer.ConsumeClaim(mockConsumerGroupSession, mockConsumerGroupClaim) + helpers.FailOnError(t, err) + + assert.Contains(t, buf.String(), "starting messages loop") +} + +func TestKafkaConsumer_ConsumeClaim_OKMessage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) + defer closer() + + kafkaConsumer := createOCPConsumer(broker.Configuration{}, mockStorage) + + mockConsumerGroupSession := &saramahelpers.MockConsumerGroupSession{} + mockConsumerGroupClaim := saramahelpers.NewMockConsumerGroupClaim([]*sarama.ConsumerMessage{ + saramahelpers.StringToSaramaConsumerMessage(testdata.ConsumerMessage), + }) + + err := kafkaConsumer.ConsumeClaim(mockConsumerGroupSession, mockConsumerGroupClaim) + helpers.FailOnError(t, err) +} diff --git a/consumer/processing.go b/consumer/processing.go index fdfd6f350..c5a0974fe 100644 --- a/consumer/processing.go +++ b/consumer/processing.go @@ -20,8 +20,9 @@ import ( "fmt" "time" + "github.com/RedHatInsights/insights-results-aggregator/storage" + "github.com/Shopify/sarama" - "github.com/google/uuid" "github.com/rs/zerolog/log" "github.com/RedHatInsights/insights-results-aggregator/metrics" @@ -31,24 +32,37 @@ import ( const ( improperIncomeMessageError = "Deserialized report read from message with improper structure " + unexpectedStorageType = "Unexpected storage type" reportAttributeSystem = "system" reportAttributeReports = "reports" reportAttributeInfo = "info" reportAttributeFingerprints = "fingerprints" reportAttributeMetadata = "analysis_metadata" - numberOfExpectedKeysInReport = 4 + numberOfExpectedKeysInReport = 3 // Number of items in expectedKeysInReport ) var ( expectedKeysInReport = []string{ - reportAttributeFingerprints, reportAttributeInfo, reportAttributeReports, reportAttributeSystem, + reportAttributeFingerprints, reportAttributeReports, reportAttributeSystem, } ) -// Report represents report send in a message consumed from any broker +// MessageProcessor offers the interface for processing a received message +type MessageProcessor interface { + deserializeMessage(messageValue []byte) (incomingMessage, error) + parseMessage(consumer *KafkaConsumer, msg *sarama.ConsumerMessage) (incomingMessage, error) + processMessage(consumer *KafkaConsumer, msg *sarama.ConsumerMessage) (types.RequestID, incomingMessage, error) + shouldProcess(consumer *KafkaConsumer, consumed *sarama.ConsumerMessage, parsed *incomingMessage) error +} + +// Report represents report sent in a message consumed from any broker type Report map[string]*json.RawMessage +// DvoMetrics represents DVO workload recommendations received as part +// of the incoming message +type DvoMetrics map[string]*json.RawMessage + type system struct { Hostname string `json:"hostname"` } @@ -59,13 +73,15 @@ type incomingMessage struct { Account *types.Account `json:"AccountNumber"` ClusterName *types.ClusterName `json:"ClusterName"` Report *Report `json:"Report"` + DvoMetrics *DvoMetrics `json:"Metrics"` // LastChecked is a date in format "2020-01-23T16:15:59.478901889Z" - LastChecked string `json:"LastChecked"` - Version types.SchemaVersion `json:"Version"` - RequestID types.RequestID `json:"RequestId"` - Metadata types.Metadata `json:"Metadata"` - ParsedHits []types.ReportItem - ParsedInfo []types.InfoItem + LastChecked string `json:"LastChecked"` + Version types.SchemaVersion `json:"Version"` + RequestID types.RequestID `json:"RequestId"` + Metadata types.Metadata `json:"Metadata"` + ParsedHits []types.ReportItem + ParsedInfo []types.InfoItem + ParsedWorkloads []types.WorkloadRecommendation } var currentSchemaVersion = types.AllowedVersions{ @@ -142,7 +158,7 @@ func (consumer *KafkaConsumer) HandleMessage(msg *sarama.ConsumerMessage) error metrics.ConsumedMessages.Inc() startTime := time.Now() - requestID, message, err := consumer.processMessage(msg) + requestID, message, err := consumer.MessageProcessor.processMessage(consumer, msg) timeAfterProcessingMessage := time.Now() messageProcessingDuration := timeAfterProcessingMessage.Sub(startTime).Seconds() @@ -161,11 +177,13 @@ func (consumer *KafkaConsumer) HandleMessage(msg *sarama.ConsumerMessage) error log.Error().Err(err).Msg("Error processing message consumed from Kafka") consumer.numberOfErrorsConsumingMessages++ - - if err := consumer.Storage.WriteConsumerError(msg, err); err != nil { - log.Error().Err(err).Msg("Unable to write consumer error to storage") + if ocpStorage, ok := consumer.Storage.(storage.OCPRecommendationsStorage); ok { + if err := ocpStorage.WriteConsumerError(msg, err); err != nil { + log.Error().Err(err).Msg("Unable to write consumer error to storage") + } + } else { + logMessageError(consumer, msg, &message, unexpectedStorageType, errors.New("consumer error could not be stored")) } - consumer.sendDeadLetter(msg) consumer.updatePayloadTracker(requestID, time.Now(), message.Organization, message.Account, producer.StatusError) @@ -232,47 +250,6 @@ func checkMessageOrgInAllowList(consumer *KafkaConsumer, message *incomingMessag return true, "" } -func (consumer *KafkaConsumer) writeRecommendations( - msg *sarama.ConsumerMessage, message incomingMessage, reportAsBytes []byte, -) (time.Time, error) { - err := consumer.Storage.WriteRecommendationsForCluster( - *message.Organization, - *message.ClusterName, - types.ClusterReport(reportAsBytes), - types.Timestamp(time.Now().UTC().Format(time.RFC3339)), - ) - if err != nil { - logMessageError(consumer, msg, &message, "Error writing recommendations to database", err) - return time.Time{}, err - } - tStored := time.Now() - logMessageDebug(consumer, msg, &message, "Stored recommendations") - return tStored, nil -} - -func (consumer *KafkaConsumer) writeInfoReport( - msg *sarama.ConsumerMessage, message incomingMessage, infoStoredAtTime time.Time, -) error { - // it is expected that message.ParsedInfo contains at least one item: - // result from special INFO rule containing cluster version that is - // used just in external data pipeline - err := consumer.Storage.WriteReportInfoForCluster( - *message.Organization, - *message.ClusterName, - message.ParsedInfo, - infoStoredAtTime, - ) - if err == types.ErrOldReport { - logMessageInfo(consumer, msg, &message, "Skipping because a more recent info report already exists for this cluster") - return nil - } else if err != nil { - logMessageError(consumer, msg, &message, "Error writing info report to database", err) - return err - } - logMessageInfo(consumer, msg, &message, "Stored info report") - return nil -} - func (consumer *KafkaConsumer) logMsgForFurtherAnalysis(msg *sarama.ConsumerMessage) { if consumer.Configuration.DisplayMessageWithWrongStructure { log.Info().Str("unparsed message", string(msg.Value)).Msg("Message for further analysis") @@ -287,15 +264,6 @@ func (consumer *KafkaConsumer) logReportStructureError(err error, msg *sarama.Co } } -func (consumer *KafkaConsumer) shouldProcess(consumed *sarama.ConsumerMessage, parsed *incomingMessage) error { - err := checkReportStructure(*parsed.Report) - if err != nil { - consumer.logReportStructureError(err, consumed) - return err - } - return nil -} - func (consumer *KafkaConsumer) retrieveLastCheckedTime(msg *sarama.ConsumerMessage, parsedMsg *incomingMessage) (time.Time, error) { lastCheckedTime, err := time.Parse(time.RFC3339Nano, parsedMsg.LastChecked) if err != nil { @@ -314,15 +282,36 @@ func (consumer *KafkaConsumer) retrieveLastCheckedTime(msg *sarama.ConsumerMessa return lastCheckedTime, nil } -// processMessage processes an incoming message -func (consumer *KafkaConsumer) processMessage(msg *sarama.ConsumerMessage) (types.RequestID, incomingMessage, error) { - tStart := time.Now() +// organizationAllowed checks whether the given organization is on allow list or not +func organizationAllowed(consumer *KafkaConsumer, orgID types.OrgID) bool { + allowList := consumer.Configuration.OrgAllowlist + if allowList == nil { + return false + } - log.Info().Int(offsetKey, int(msg.Offset)).Str(topicKey, consumer.Configuration.Topic).Str(groupKey, consumer.Configuration.Group).Msg("Consumed") + orgAllowed := allowList.Contains(orgID) + + return orgAllowed +} - message, err := consumer.parseMessage(msg, tStart) +type storeInDBFunction func( + consumer *KafkaConsumer, + msg *sarama.ConsumerMessage, + message incomingMessage) (types.RequestID, incomingMessage, error) + +// commonProcessMessage is used by both DVO and OCP message processors as they share +// some steps in common. However, storing the DVO and OCP reports in the DB is done +// differently, so it was needed to introduce a storeInDBFunction type +func commonProcessMessage( + consumer *KafkaConsumer, + msg *sarama.ConsumerMessage, + storeInDBFunction storeInDBFunction, +) (types.RequestID, incomingMessage, error) { + tStart := time.Now() + log.Info().Int(offsetKey, int(msg.Offset)).Str(topicKey, consumer.Configuration.Topic).Str(groupKey, consumer.Configuration.Group).Msg("Consumed") + message, err := consumer.MessageProcessor.parseMessage(consumer, msg) if err != nil { - if err == types.ErrEmptyReport { + if errors.Is(err, types.ErrEmptyReport) { logMessageInfo(consumer, msg, &message, "This message has an empty report and will not be processed further") metrics.SkippedEmptyReports.Inc() return message.RequestID, message, nil @@ -331,6 +320,7 @@ func (consumer *KafkaConsumer) processMessage(msg *sarama.ConsumerMessage) (type } logMessageInfo(consumer, msg, &message, "Read") tRead := time.Now() + logDuration(tStart, tRead, msg.Offset, "read") checkMessageVersion(consumer, &message, msg) @@ -341,224 +331,11 @@ func (consumer *KafkaConsumer) processMessage(msg *sarama.ConsumerMessage) (type } tAllowlisted := time.Now() + logDuration(tRead, tAllowlisted, msg.Offset, "org_filtering") logMessageDebug(consumer, msg, &message, "Marshalled") tMarshalled := time.Now() - - lastCheckedTime, err := consumer.retrieveLastCheckedTime(msg, &message) - if err != nil { - return message.RequestID, message, err - } - tTimeCheck := time.Now() - - // timestamp when the report is about to be written into database - storedAtTime := time.Now() - - reportAsBytes, err := json.Marshal(*message.Report) - if err != nil { - logMessageError(consumer, msg, &message, "Error marshalling report", err) - return message.RequestID, message, err - } - - err = consumer.Storage.WriteReportForCluster( - *message.Organization, - *message.ClusterName, - types.ClusterReport(reportAsBytes), - message.ParsedHits, - lastCheckedTime, - message.Metadata.GatheredAt, - storedAtTime, - message.RequestID, - ) - if err == types.ErrOldReport { - logMessageInfo(consumer, msg, &message, "Skipping because a more recent report already exists for this cluster") - return message.RequestID, message, nil - } else if err != nil { - logMessageError(consumer, msg, &message, "Error writing report to database", err) - return message.RequestID, message, err - } - logMessageDebug(consumer, msg, &message, "Stored report") - tStored := time.Now() - - tRecommendationsStored, err := consumer.writeRecommendations(msg, message, reportAsBytes) - if err != nil { - return message.RequestID, message, err - } - - // rule hits has been stored into database - time to log all these great info - logClusterInfo(&message) - - infoStoredAtTime := time.Now() - if err := consumer.writeInfoReport(msg, message, infoStoredAtTime); err != nil { - return message.RequestID, message, err - } - infoStored := time.Now() - - // log durations for every message consumption steps - logDuration(tStart, tRead, msg.Offset, "read") - logDuration(tRead, tAllowlisted, msg.Offset, "org_filtering") logDuration(tAllowlisted, tMarshalled, msg.Offset, "marshalling") - logDuration(tMarshalled, tTimeCheck, msg.Offset, "time_check") - logDuration(tTimeCheck, tStored, msg.Offset, "db_store_report") - logDuration(tStored, tRecommendationsStored, msg.Offset, "db_store_recommendations") - logDuration(infoStoredAtTime, infoStored, msg.Offset, "db_store_info_report") - - // message has been parsed and stored into storage - return message.RequestID, message, nil -} - -// organizationAllowed checks whether the given organization is on allow list or not -func organizationAllowed(consumer *KafkaConsumer, orgID types.OrgID) bool { - allowList := consumer.Configuration.OrgAllowlist - if allowList == nil { - return false - } - - orgAllowed := allowList.Contains(orgID) - - return orgAllowed -} - -func verifySystemAttributeIsEmpty(r Report) bool { - var s system - if err := json.Unmarshal(*r[reportAttributeSystem], &s); err != nil { - return false - } - if s.Hostname != "" { - return false - } - return true -} - -// isReportWithEmptyAttributes checks if the report is empty, or if the attributes -// expected in the report, minus the analysis_metadata, are empty. -// If this function returns true, this report will not be processed further as it is -// PROBABLY the result of an archive that was not processed by insights-core. -// see https://github.com/RedHatInsights/insights-results-aggregator/issues/1834 -func isReportWithEmptyAttributes(r Report) bool { - // Create attribute checkers for each attribute - for attr, attrData := range r { - // we don't care about the analysis_metadata attribute - if attr == reportAttributeMetadata { - continue - } - // special handling for the system attribute, as it comes with data when empty - if attr == reportAttributeSystem { - if !verifySystemAttributeIsEmpty(r) { - return false - } - continue - } - // Check if this attribute of the report is empty - checker := JSONAttributeChecker{data: *attrData} - if !checker.IsEmpty() { - return false - } - } - return true -} - -// checkReportStructure tests if the report has correct structure -func checkReportStructure(r Report) error { - // the structure is not well-defined yet, so all we should do is to check if all keys are there - - // 'skips' key is now optional, we should not expect it anymore: - // https://github.com/RedHatInsights/insights-results-aggregator/issues/1206 - keysNotFound := make([]string, 0, numberOfExpectedKeysInReport) - keysFound := 0 - // check if the structure contains all expected keys - for _, expectedKey := range expectedKeysInReport { - _, found := r[expectedKey] - if !found { - keysNotFound = append(keysNotFound, expectedKey) - } else { - keysFound++ - } - } - - if keysFound == numberOfExpectedKeysInReport { - return nil - } - - // empty reports mean that this message should not be processed further - isEmpty := len(r) == 0 || isReportWithEmptyAttributes(r) - if isEmpty { - log.Debug().Msg("Empty report or report with only empty attributes. Processing of this message will be skipped.") - return types.ErrEmptyReport - } - - // report is not empty, and some keys have not been found -> malformed - if len(keysNotFound) != 0 { - return fmt.Errorf("improper report structure, missing key(s) with name '%v'", keysNotFound) - } - - return nil -} - -// deserializeMessage tries to parse incoming message and read all required attributes from it -func deserializeMessage(messageValue []byte) (incomingMessage, error) { - var deserialized incomingMessage - - err := json.Unmarshal(messageValue, &deserialized) - if err != nil { - return deserialized, err - } - - if deserialized.Organization == nil { - return deserialized, errors.New("missing required attribute 'OrgID'") - } - if deserialized.ClusterName == nil { - return deserialized, errors.New("missing required attribute 'ClusterName'") - } - if deserialized.Report == nil { - return deserialized, errors.New("missing required attribute 'Report'") - } - - _, err = uuid.Parse(string(*deserialized.ClusterName)) - - if err != nil { - return deserialized, errors.New("cluster name is not a UUID") - } - return deserialized, nil -} - -// parseReportContent verifies the content of the Report structure and parses it into -// the relevant parts of the incomingMessage structure -func parseReportContent(message *incomingMessage) error { - err := json.Unmarshal(*((*message.Report)[reportAttributeReports]), &message.ParsedHits) - if err != nil { - return err - } - - // it is expected that message.ParsedInfo contains at least one item: - // result from special INFO rule containing cluster version that is - // used just in external data pipeline - err = json.Unmarshal(*((*message.Report)[reportAttributeInfo]), &message.ParsedInfo) - if err != nil { - return err - } - return nil -} - -func (consumer *KafkaConsumer) parseMessage(msg *sarama.ConsumerMessage, tStart time.Time) (incomingMessage, error) { - message, err := deserializeMessage(msg.Value) - if err != nil { - consumer.logMsgForFurtherAnalysis(msg) - logUnparsedMessageError(consumer, msg, "Error parsing message from Kafka", err) - return message, err - } - - consumer.updatePayloadTracker(message.RequestID, tStart, message.Organization, message.Account, producer.StatusReceived) - - if err := consumer.shouldProcess(msg, &message); err != nil { - return message, err - } - - err = parseReportContent(&message) - if err != nil { - consumer.logReportStructureError(err, msg) - return message, err - } - return message, nil + return storeInDBFunction(consumer, msg, message) } diff --git a/dashboards/grafana-dashboard-ccx-cache-writer.yaml b/dashboards/grafana-dashboard-ccx-cache-writer.yaml deleted file mode 100644 index 418785177..000000000 --- a/dashboards/grafana-dashboard-ccx-cache-writer.yaml +++ /dev/null @@ -1,1145 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: grafana-dashboard-ccx-cache-writer - labels: - grafana_dashboard: "true" - annotations: - grafana-folder: /grafana-dashboard-definitions/Insights -data: - general.json: |- - { - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": 143497, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "uid": "$datasource" - }, - "fieldConfig": { - "defaults": { - "decimals": 0, - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 3, - "x": 0, - "y": 0 - }, - "id": 16, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.3.8", - "targets": [ - { - "datasource": { - "uid": "$datasource" - }, - "expr": "sum (up{service=\"ccx-cache-writer-prometheus-exporter\", namespace=\"$namespace\"})", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "instant": false, - "intervalFactor": 1, - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - } - ], - "title": "Number of pods", - "transparent": true, - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "$datasource" - }, - "fieldConfig": { - "defaults": { - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 6, - "x": 3, - "y": 0 - }, - "id": 18, - "options": { - "colorMode": "background", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.3.8", - "targets": [ - { - "datasource": { - "uid": "$datasource" - }, - "editorMode": "code", - "expr": "sum(increase(consuming_errors{namespace=\"$namespace\", service=\"ccx-cache-writer-prometheus-exporter\"}[1w]))", - "interval": "", - "legendFormat": "", - "range": true, - "refId": "A" - } - ], - "title": "Consuming Errors (Past Week)", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "$datasource" - }, - "decimals": 2, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 0, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 5, - "w": 24, - "x": 0, - "y": 5 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "9.3.8", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "uid": "$datasource" - }, - "expr": "sum (container_memory_working_set_bytes{namespace=\"$namespace\",pod=~\"ccx-cache-writer-.*\"}) by (pod)", - "format": "time_series", - "interval": "10s", - "intervalFactor": 1, - "legendFormat": "{{ pod }}", - "metric": "container_memory_usage:sort_desc", - "refId": "A", - "step": 10 - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Pods memory usage", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "$datasource" - }, - "decimals": 3, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 0, - "fillGradient": 0, - "grid": {}, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 10 - }, - "height": "", - "hiddenSeries": false, - "id": 12, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": true, - "min": false, - "rightSide": false, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "9.3.8", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "uid": "$datasource" - }, - "expr": "sum (rate (container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=~\"ccx-cache-writer-.*\"}[1m])) by (pod)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ pod }}", - "metric": "container_cpu", - "refId": "A", - "step": 10 - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Pods CPU usage (1m avg)", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 2, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": "cores", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "$datasource" - }, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 10 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "9.3.8", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "uid": "$datasource" - }, - "editorMode": "code", - "expr": "sum(kafka_consumergroup_group_lag{topic=~\".*ccx.ocp.results\", group=\"ccx_cache_writer_app\"}) by (topic, group)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "topic: {{topic}} group: {{group}}", - "range": true, - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Kafka lag", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "min": "0", - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "$datasource" - }, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 18 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": true, - "min": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "9.3.8", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "uid": "$datasource" - }, - "expr": "sum(increase(consumed_messages{service=\"ccx-cache-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "consumed messages", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - }, - { - "datasource": { - "uid": "$datasource" - }, - "expr": "sum(increase(written_reports{service=\"ccx-cache-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 1, - "legendFormat": "written reports", - "orderByTime": "ASC", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Consumed Messages & Written Reports", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "$datasource" - }, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 18 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "9.3.8", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "uid": "$datasource" - }, - "editorMode": "code", - "expr": "increase(consuming_errors{namespace=\"$namespace\", service=\"ccx-cache-writer-prometheus-exporter\"}[1m])", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "instant": false, - "interval": "1m", - "intervalFactor": 1, - "legendFormat": "errors", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Consuming Errors", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "$datasource" - }, - "description": "Should normally be 0, because we use it only for testing", - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 27 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "9.3.8", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "uid": "$datasource" - }, - "expr": "increase(produced_messages{service=\"ccx-cache-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m])", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "interval": "1m", - "intervalFactor": 1, - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Produced Messages", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "min": "0", - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "uid": "$datasource" - }, - "fieldConfig": { - "defaults": { - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 27 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "9.3.8", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "uid": "$datasource" - }, - "expr": "sum(\n increase(successful_messages_processing_time_sum{service=\"ccx-cache-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m])\n) by (service)\n/\nsum(\n increase(successful_messages_processing_time_count{service=\"ccx-cache-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m])\n) by (service)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "interval": "1m", - "intervalFactor": 1, - "legendFormat": "", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Average Successful Message Processing Time", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - } - ], - "refresh": false, - "schemaVersion": 37, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "crcp01ue1-prometheus", - "value": "crcp01ue1-prometheus" - }, - "hide": 0, - "includeAll": false, - "label": "datasource", - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "/.*crc.*/", - "skipUrlSync": false, - "type": "datasource" - }, - { - "current": { - "selected": true, - "text": "ccx-data-pipeline-prod", - "value": "ccx-data-pipeline-prod" - }, - "hide": 0, - "includeAll": false, - "label": "namespace", - "multi": false, - "name": "namespace", - "options": [ - { - "selected": false, - "text": "ccx-data-pipeline-stage", - "value": "ccx-data-pipeline-stage" - }, - { - "selected": true, - "text": "ccx-data-pipeline-prod", - "value": "ccx-data-pipeline-prod" - } - ], - "query": "ccx-data-pipeline-stage,ccx-data-pipeline-prod", - "skipUrlSync": false, - "type": "custom" - } - ] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "CCX Cache Writer", - "uid": "ccx-cache-writer", - "version": 2, - "weekStart": "" - } diff --git a/dashboards/grafana-dashboard-insights-ccx-insights-results-db-writer.configmap.yaml b/dashboards/grafana-dashboard-insights-ccx-insights-results-db-writer.configmap.yaml index bf7971c3b..6bcd4ce93 100644 --- a/dashboards/grafana-dashboard-insights-ccx-insights-results-db-writer.configmap.yaml +++ b/dashboards/grafana-dashboard-insights-ccx-insights-results-db-writer.configmap.yaml @@ -13,39 +13,50 @@ data: "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "datasource", + "uid": "grafana" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, "type": "dashboard" } ] }, + "description": "", "editable": true, - "gnetId": null, + "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 70, - "iteration": 1610545066390, "links": [], + "liveNow": false, "panels": [ { - "cacheTimeout": null, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "decimals": 0, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], - "nullValueMode": "connected", "thresholds": { "mode": "absolute", "steps": [ @@ -64,13 +75,12 @@ data: "overrides": [] }, "gridPos": { - "h": 5, - "w": 3, + "h": 6, + "w": 6, "x": 0, "y": 0 }, "id": 16, - "interval": null, "links": [], "maxDataPoints": 100, "options": { @@ -87,10 +97,14 @@ data: }, "textMode": "auto" }, - "pluginVersion": "7.2.1", + "pluginVersion": "9.3.8", "targets": [ { - "expr": "sum (up{service=\"ccx-insights-results-db-writer-prometheus-exporter\", namespace=\"$namespace\"})", + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum (up{service=~\"$services\", namespace=\"$namespace\"}) by (container)", "format": "time_series", "groupBy": [ { @@ -108,6 +122,7 @@ data: ], "instant": false, "intervalFactor": 1, + "legendFormat": "{{container}}", "orderByTime": "ASC", "policy": "default", "refId": "A", @@ -134,112 +149,175 @@ data: "type": "stat" }, { - "datasource": "$datasource", + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } + "links": [] }, "overrides": [] }, + "fill": 1, + "fillGradient": 0, "gridPos": { - "h": 5, - "w": 6, - "x": 3, + "h": 6, + "w": 9, + "x": 6, "y": 0 }, - "id": 18, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", "options": { - "colorMode": "background", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" + "alertThreshold": true }, - "pluginVersion": "7.2.1", + "percentage": false, + "pluginVersion": "9.3.8", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, "targets": [ { - "expr": "sum(increase(consuming_errors{namespace=\"$namespace\"}[1w]))", - "interval": "", - "legendFormat": "", - "refId": "A" + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(\n increase(successful_messages_processing_time_sum{service=~\"$services\",namespace=\"$namespace\"}[1m])\n) by (container)\n/\nsum(\n increase(successful_messages_processing_time_count{service=~\"$services\",namespace=\"$namespace\"}[1m])\n) by (container)", + "format": "time_series", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "interval": "1m", + "intervalFactor": 1, + "legendFormat": "{{container}}", + "orderByTime": "ASC", + "policy": "default", + "range": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] } ], - "timeFrom": null, - "timeShift": null, - "title": "Consuming Errors (Past Week)", - "type": "stat" + "thresholds": [], + "timeRegions": [], + "title": "Average Successful Message Processing Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "decimals": 2, - "editable": true, - "error": false, + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] }, - "fill": 0, + "fill": 1, "fillGradient": 0, - "grid": {}, "gridPos": { - "h": 5, - "w": 24, - "x": 0, - "y": 5 + "h": 6, + "w": 9, + "x": 15, + "y": 0 }, "hiddenSeries": false, - "id": 14, + "id": 4, "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": true, + "avg": false, + "current": false, + "max": false, "min": false, - "rightSide": true, "show": true, - "sideWidth": null, - "sort": "current", - "sortDesc": true, "total": false, - "values": true + "values": false }, "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "connected", + "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.2.1", + "pluginVersion": "9.3.8", "pointradius": 5, "points": false, "renderer": "flot", @@ -249,56 +327,79 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum (container_memory_working_set_bytes{namespace=\"$namespace\",pod=~\"ccx-insights-results-db-writer-.*\"}) by (pod)", + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(increase(consuming_errors{namespace=\"$namespace\", service=~\"$services\"}[1m])) by (container)", "format": "time_series", - "interval": "10s", + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "instant": false, + "interval": "1m", "intervalFactor": 1, - "legendFormat": "{{ pod }}", - "metric": "container_memory_usage:sort_desc", + "legendFormat": "{{container}}", + "orderByTime": "ASC", + "policy": "default", "refId": "A", - "step": 10 + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Pods memory usage", + "title": "Consuming Errors", "tooltip": { - "msResolution": false, "shared": true, - "sort": 2, - "value_type": "cumulative" + "sort": 0, + "value_type": "individual" }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "format": "bytes", - "label": null, + "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, - "show": false + "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -306,13 +407,15 @@ data: "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "decimals": 3, + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "decimals": 2, "editable": true, "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -321,14 +424,13 @@ data: "fillGradient": 0, "grid": {}, "gridPos": { - "h": 8, - "w": 12, + "h": 6, + "w": 8, "x": 0, - "y": 10 + "y": 6 }, - "height": "", "hiddenSeries": false, - "id": 12, + "id": 14, "legend": { "alignAsTable": true, "avg": true, @@ -336,8 +438,7 @@ data: "max": true, "min": false, "rightSide": false, - "show": true, - "sideWidth": null, + "show": false, "sort": "current", "sortDesc": true, "total": false, @@ -351,7 +452,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.2.1", + "pluginVersion": "9.3.8", "pointradius": 5, "points": false, "renderer": "flot", @@ -361,56 +462,50 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum (rate (container_cpu_usage_seconds_total{namespace=\"$namespace\",container=\"ccx-insights-results-db-writer\"}[1m])) by (pod)", + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum (container_memory_working_set_bytes{namespace=\"$namespace\",pod=~\"$services\"}) by (pod)", "format": "time_series", - "interval": "", + "interval": "10s", "intervalFactor": 1, "legendFormat": "{{ pod }}", - "metric": "container_cpu", + "metric": "container_memory_usage:sort_desc", + "range": true, "refId": "A", "step": 10 } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Pods CPU usage (1m avg)", + "title": "Pods memory usage", "tooltip": { - "msResolution": true, + "msResolution": false, "shared": true, "sort": 2, "value_type": "cumulative" }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "format": "none", - "label": "cores", + "format": "bytes", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": false } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -418,42 +513,53 @@ data: "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "decimals": 3, + "editable": true, + "error": false, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] }, - "fill": 1, + "fill": 0, "fillGradient": 0, + "grid": {}, "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 10 + "h": 6, + "w": 8, + "x": 8, + "y": 6 }, + "height": "", "hiddenSeries": false, - "id": 10, + "id": 12, "legend": { - "avg": false, - "current": false, - "max": false, + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, "min": false, - "show": true, + "rightSide": false, + "show": false, + "sort": "current", + "sortDesc": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "options": { "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.2.1", + "pluginVersion": "9.3.8", "pointradius": 5, "points": false, "renderer": "flot", @@ -463,52 +569,51 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(kafka_consumergroup_group_lag{topic=~\".*ccx.ocp.results\", group=\"ccx_data_pipeline_app\"}) by (topic, group)", + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum (rate (container_cpu_usage_seconds_total{namespace=\"$namespace\",pod=~\"$services\"}[1m])) by (pod)", "format": "time_series", + "interval": "", "intervalFactor": 1, - "legendFormat": "topic: {{topic}} group: {{group}}", - "refId": "A" + "legendFormat": "{{ pod }}", + "metric": "container_cpu", + "range": true, + "refId": "A", + "step": 10 } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Kafka lag", + "title": "Pods CPU usage (1m avg)", "tooltip": { + "msResolution": true, "shared": true, - "sort": 0, - "value_type": "individual" + "sort": 2, + "value_type": "cumulative" }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "format": "short", - "label": null, + "format": "none", + "label": "cores", "logBase": 1, - "max": null, - "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": false } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -516,10 +621,12 @@ data: "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -527,22 +634,21 @@ data: "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 18 + "h": 6, + "w": 8, + "x": 16, + "y": 6 }, "hiddenSeries": false, - "id": 2, + "id": 10, "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": true, - "min": true, + "avg": false, + "current": false, + "max": false, + "min": false, "show": true, - "total": true, - "values": true + "total": false, + "values": false }, "lines": true, "linewidth": 1, @@ -552,7 +658,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.2.1", + "pluginVersion": "9.3.8", "pointradius": 5, "points": false, "renderer": "flot", @@ -562,91 +668,21 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(increase(consumed_messages{service=\"ccx-insights-results-db-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "consumed messages", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - }, - { - "expr": "sum(increase(written_reports{service=\"ccx-insights-results-db-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m]))", + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(kafka_consumergroup_group_lag{group=~\"$consumer_groups\"}) by (topic, group)", "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], "intervalFactor": 1, - "legendFormat": "written reports", - "orderByTime": "ASC", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] + "legendFormat": "{{group}}", + "range": true, + "refId": "A" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Consumed Messages & Written Reports", + "title": "Kafka lag", "tooltip": { "shared": true, "sort": 0, @@ -654,33 +690,25 @@ data: }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, + "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, - "show": true + "show": false } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -688,10 +716,12 @@ data: "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -699,21 +729,22 @@ data: "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 18 + "h": 8, + "w": 8, + "x": 0, + "y": 12 }, "hiddenSeries": false, - "id": 4, + "id": 2, "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": true, + "values": true }, "lines": true, "linewidth": 1, @@ -723,7 +754,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.2.1", + "pluginVersion": "9.3.8", "pointradius": 5, "points": false, "renderer": "flot", @@ -733,7 +764,11 @@ data: "steppedLine": false, "targets": [ { - "expr": "increase(consuming_errors{namespace=\"$namespace\"}[1m])", + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(increase(consumed_messages{service=~\"$services\",namespace=\"$namespace\"}[1m])) by (container)", "format": "time_series", "groupBy": [ { @@ -750,9 +785,9 @@ data: } ], "instant": false, - "interval": "1m", + "interval": "", "intervalFactor": 1, - "legendFormat": "errors", + "legendFormat": "{{container}}", "orderByTime": "ASC", "policy": "default", "refId": "A", @@ -775,10 +810,8 @@ data: } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Consuming Errors", + "title": "Consumed Messages", "tooltip": { "shared": true, "sort": 0, @@ -786,33 +819,24 @@ data: }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -820,11 +844,12 @@ data: "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "description": "Should normally be 0, because we use it only for testing", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -832,21 +857,22 @@ data: "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 27 + "h": 8, + "w": 8, + "x": 8, + "y": 12 }, "hiddenSeries": false, - "id": 8, + "id": 20, "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": true, + "values": true }, "lines": true, "linewidth": 1, @@ -856,7 +882,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.2.1", + "pluginVersion": "9.3.8", "pointradius": 5, "points": false, "renderer": "flot", @@ -866,7 +892,11 @@ data: "steppedLine": false, "targets": [ { - "expr": "increase(produced_messages{service=\"ccx-insights-results-db-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m])", + "datasource": { + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(increase(written_reports{service=~\"$services\",namespace=\"$namespace\"}[1m])) by (container)", "format": "time_series", "groupBy": [ { @@ -882,11 +912,12 @@ data: "type": "fill" } ], - "interval": "1m", "intervalFactor": 1, + "legendFormat": "{{container}}", "orderByTime": "ASC", "policy": "default", - "refId": "A", + "range": true, + "refId": "B", "resultFormat": "time_series", "select": [ [ @@ -906,10 +937,8 @@ data: } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Produced Messages", + "title": "Written Reports", "tooltip": { "shared": true, "sort": 0, @@ -917,33 +946,24 @@ data: }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": "0", "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, - "show": false + "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } }, { @@ -951,10 +971,12 @@ data: "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -962,21 +984,22 @@ data: "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 27 + "h": 8, + "w": 8, + "x": 16, + "y": 12 }, "hiddenSeries": false, - "id": 6, + "id": 21, "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": true, + "values": true }, "lines": true, "linewidth": 1, @@ -986,7 +1009,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.2.1", + "pluginVersion": "9.3.8", "pointradius": 5, "points": false, "renderer": "flot", @@ -996,51 +1019,21 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(\n increase(successful_messages_processing_time_sum{service=\"ccx-insights-results-db-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m])\n) by (service)\n/\nsum(\n increase(successful_messages_processing_time_count{service=\"ccx-insights-results-db-writer-prometheus-exporter\",namespace=\"$namespace\"}[1m])\n) by (service)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "interval": "1m", - "intervalFactor": 1, - "legendFormat": "", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "sum(increase(produced_messages{service=~\"$services\",namespace=\"$namespace\"}[1m])) by (container)", + "hide": false, + "legendFormat": "{{container}}", + "range": true, + "refId": "C" } ], "thresholds": [], - "timeFrom": null, "timeRegions": [], - "timeShift": null, - "title": "Average Successful Message Processing Time", + "title": "Produced Messages", "tooltip": { "shared": true, "sort": 0, @@ -1048,38 +1041,29 @@ data: }, "type": "graph", "xaxis": { - "buckets": null, "mode": "time", - "name": null, "show": true, "values": [] }, "yaxes": [ { - "format": "s", - "label": null, + "format": "short", "logBase": 1, - "max": null, - "min": null, "show": true }, { "format": "short", - "label": null, "logBase": 1, - "max": null, - "min": null, - "show": false + "show": true } ], "yaxis": { - "align": false, - "alignLevel": null + "align": false } } ], "refresh": false, - "schemaVersion": 26, + "schemaVersion": 37, "style": "dark", "tags": [], "templating": { @@ -1103,7 +1087,6 @@ data: "type": "datasource" }, { - "allValue": null, "current": { "selected": true, "text": "ccx-data-pipeline-prod", @@ -1129,6 +1112,48 @@ data: "query": "ccx-data-pipeline-stage,ccx-data-pipeline-prod", "skipUrlSync": false, "type": "custom" + }, + { + "current": { + "selected": false, + "text": ".*db-writer-.*|.*cache-writer-.*|.*dvo-writer-.*", + "value": ".*db-writer-.*|.*cache-writer-.*|.*dvo-writer-.*" + }, + "description": "The RegEx to match the services to monitor", + "hide": 0, + "label": "services", + "name": "services", + "options": [ + { + "selected": true, + "text": ".*db-writer-.*|.*cache-writer-.*|.*dvo-writer-.*", + "value": ".*db-writer-.*|.*cache-writer-.*|.*dvo-writer-.*" + } + ], + "query": ".*db-writer-.*|.*cache-writer-.*|.*dvo-writer-.*", + "skipUrlSync": false, + "type": "textbox" + }, + { + "current": { + "selected": false, + "text": "ccx_data_pipeline_app|ccx_cache_writer_app|dvo_writer_app", + "value": "ccx_data_pipeline_app|ccx_cache_writer_app|dvo_writer_app" + }, + "description": "The RegEx to match the consumer groups to monitor", + "hide": 0, + "label": "consumer_groups", + "name": "consumer_groups", + "options": [ + { + "selected": true, + "text": "ccx_data_pipeline_app|ccx_cache_writer_app|dvo_writer_app", + "value": "ccx_data_pipeline_app|ccx_cache_writer_app|dvo_writer_app" + } + ], + "query": "ccx_data_pipeline_app|ccx_cache_writer_app|dvo_writer_app", + "skipUrlSync": false, + "type": "textbox" } ] }, @@ -1162,7 +1187,8 @@ data: ] }, "timezone": "", - "title": "CCX Insights Results DB Writer", + "title": "CCX Writers", "uid": "C4vK5h2Wk", - "version": 1 + "version": 1, + "weekStart": "" } diff --git a/deploy/README.md b/deploy/README.md index fd7c99001..6c4148ad0 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -1,6 +1,6 @@ -# Deployment +# Deploying in ephemeral for troubleshooting and local testing -## Testing the local version of the cache-writer in ephemeral +In order to reserve a namespace, follow these steps: 1. Install `bonfire` ``` @@ -18,6 +18,15 @@ oc login --token=${TOKEN} --server=https://api.c-rh-c-eph.8p0c.p1.openshiftapps. NAMESPACE=$(bonfire namespace reserve) ``` +## Testing the local version of the dvo-writer + +4. Deploy the dvo-writer +``` +bonfire deploy -c deploy/test-dvo-writer.yaml -n $NAMESPACE --component dvo-writer ccx-data-pipeline +``` + +## Testing the local version of the cache-writer + 4. Deploy the cache-writer and Redis workloads ``` bonfire deploy -c deploy/test-cache-writer.yaml -n $NAMESPACE ccx-data-pipeline diff --git a/deploy/cache-writer.yaml b/deploy/cache-writer.yaml index edae65528..0d30663f6 100644 --- a/deploy/cache-writer.yaml +++ b/deploy/cache-writer.yaml @@ -41,7 +41,7 @@ objects: enabled: true podSpec: env: - - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESS + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESSES value: "${KAFKA_BOOTSTRAP_HOST}:${KAFKA_BOOTSTRAP_PORT}" - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__TIMEOUT value: "${KAFKA_TIMEOUT}" @@ -77,7 +77,9 @@ objects: value: "/data/org_whitelist.csv" - name: INSIGHTS_RESULTS_AGGREGATOR__METRICS__ENABLED value: "true" - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__TYPE + - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE + value: "ocp_recommendations" + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE value: redis - name: INSIGHTS_RESULTS_AGGREGATOR__REDIS__DATABASE value: "${REDIS_DATABASE}" @@ -634,7 +636,7 @@ parameters: value: "true" required: true - name: CW_LOG_STREAM - value: $HOSTNAME + value: "cache-writer" - name: CREATE_STREAM_IF_NOT_EXISTS value: "true" - name: CACHE_WRITER_CPU_LIMIT diff --git a/deploy/clowdapp.yaml b/deploy/clowdapp.yaml index a255c2e7f..997a745df 100644 --- a/deploy/clowdapp.yaml +++ b/deploy/clowdapp.yaml @@ -57,17 +57,19 @@ objects: - name: execute-migrations image: ${IMAGE}:${IMAGE_TAG} env: + - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE + value: "ocp_recommendations" - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__ENABLED value: "false" - name: INSIGHTS_RESULTS_AGGREGATOR__METRICS__ENABLED value: "false" - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER value: postgres - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_PARAMS + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_PARAMS value: ${PG_PARAMS} - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__LOG_SQL_QUERIES + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__LOG_SQL_QUERIES value: "true" - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__TYPE + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE value: "sql" - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOGGING_TO_CLOUD_WATCH_ENABLED value: ${CLOUDWATCH_ENABLED} @@ -113,7 +115,9 @@ objects: - migration - latest env: - - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESS + - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE + value: "ocp_recommendations" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESSES value: "${KAFKA_BOOTSTRAP_HOST}:${KAFKA_BOOTSTRAP_PORT}" - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__TIMEOUT value: "${KAFKA_TIMEOUT}" @@ -151,11 +155,11 @@ objects: value: "/data/org_whitelist.csv" - name: INSIGHTS_RESULTS_AGGREGATOR__METRICS__ENABLED value: "true" - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER value: postgres - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_PARAMS + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_PARAMS value: ${PG_PARAMS} - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__TYPE + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE value: "sql" - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOG_LEVEL value: ${LOG_LEVEL} @@ -252,11 +256,17 @@ objects: value: "255" - name: INSIGHTS_RESULTS_AGGREGATOR__SERVER__ORG_OVERVIEW_LIMIT_HOURS value: "3" - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER value: postgres - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_PARAMS + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_PARAMS value: ${PG_PARAMS} - - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE__TYPE + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE + value: "sql" + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__DB_DRIVER + value: postgres + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_PARAMS + value: ${PG_PARAMS} + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__TYPE value: "sql" - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOG_LEVEL value: ${LOG_LEVEL} diff --git a/deploy/dvo-writer.yaml b/deploy/dvo-writer.yaml new file mode 100644 index 000000000..fae8f1f53 --- /dev/null +++ b/deploy/dvo-writer.yaml @@ -0,0 +1,354 @@ +# Copyright 2023 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: Template +metadata: + name: dvo-writer +objects: + +- kind: HorizontalPodAutoscaler + apiVersion: autoscaling/v1 + metadata: + labels: + app: ccx-data-pipeline + name: dvo-writer + spec: + minReplicas: ${{MIN_REPLICAS}} + maxReplicas: ${{MAX_REPLICAS}} + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: dvo-writer + targetCPUUtilizationPercentage: 80 + +- apiVersion: cloud.redhat.com/v1alpha1 + kind: ClowdApp + metadata: + name: dvo-writer + spec: + envName: ${ENV_NAME} + testing: + iqePlugin: ccx + deployments: + - name: instance + minReplicas: ${{DVO_WRITER_REPLICAS}} + webServices: + public: + enabled: false + private: + enabled: false + metrics: + enabled: true + podSpec: + initContainers: + - name: execute-migrations + image: ${IMAGE}:${IMAGE_TAG} + env: + - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE + value: "dvo_recommendations" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__ENABLED + value: "false" + - name: INSIGHTS_RESULTS_AGGREGATOR__METRICS__ENABLED + value: "false" + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__DB_DRIVER + value: postgres + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_PARAMS + value: ${PG_PARAMS} + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__LOG_SQL_QUERIES + value: "true" + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__TYPE + value: "sql" + - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOGGING_TO_CLOUD_WATCH_ENABLED + value: ${CLOUDWATCH_ENABLED} + - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOGGING_TO_SENTRY_ENABLED + value: ${SENTRY_ENABLED} + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__DEBUG + value: ${CLOUDWATCH_DEBUG} + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__STREAM_NAME + value: ${LOG_STREAM} + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__CREATE_STREAM_IF_NOT_EXISTS + value: ${CREATE_STREAM_IF_NOT_EXISTS} + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__AWS_REGION + valueFrom: + secretKeyRef: + name: cloudwatch + key: aws_region + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOG_GROUP + valueFrom: + secretKeyRef: + name: cloudwatch + key: log_group_name + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__AWS_ACCESS_ID + valueFrom: + secretKeyRef: + name: cloudwatch + key: aws_access_key_id + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__AWS_SECRET_KEY + valueFrom: + secretKeyRef: + name: cloudwatch + key: aws_secret_access_key + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__SENTRY__DSN + valueFrom: + secretKeyRef: + key: dsn + name: dvo-writer-dsn + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__SENTRY__ENVIRONMENT + value: ${ENV_NAME} + resources: + limits: + cpu: ${DVO_WRITER_CPU_LIMIT} + memory: ${DVO_WRITER_MEMORY_LIMIT} + requests: + cpu: ${DVO_WRITER_CPU_REQUEST} + memory: ${DVO_WRITER_MEMORY_REQUEST} + command: + - ./insights-results-aggregator + - migration + - latest + env: + - name: INSIGHTS_RESULTS_AGGREGATOR__STORAGE_BACKEND__USE + value: "dvo_recommendations" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESSES + value: "${KAFKA_BOOTSTRAP_HOST}:${KAFKA_BOOTSTRAP_PORT}" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__TIMEOUT + value: "${KAFKA_TIMEOUT}" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__TOPIC + value: "${INCOMING_TOPIC}" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__DEAD_LETTER_QUEUE_TOPIC + value: "${DEAD_LETTER_QUEUE_TOPIC}" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__PAYLOAD_TRACKER_TOPIC + value: "${PAYLOAD_TRACKER_TOPIC}" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__SERVICE_NAME + value: "${DVO_WRITER_SERVICE_NAME}" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__GROUP + value: "${GROUP_ID}" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__ENABLED + value: "true" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__ENABLE_ORG_WHITELIST + value: "false" + - name: INSIGHTS_RESULTS_AGGREGATOR__BROKER__DISPLAY_MESSAGE_WITH_WRONG_STRUCTURE + value: "true" + - name: INSIGHTS_RESULTS_AGGREGATOR__PROCESSING__ORG_WHITELIST_FILE + value: "/data/org_whitelist.csv" + - name: INSIGHTS_RESULTS_AGGREGATOR__METRICS__ENABLED + value: "true" + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__DB_DRIVER + value: postgres + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__PG_PARAMS + value: ${PG_PARAMS} + - name: INSIGHTS_RESULTS_AGGREGATOR__DVO_RECOMMENDATIONS_STORAGE__TYPE + value: "sql" + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER + value: postgres + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_PARAMS + value: ${PG_PARAMS} + - name: INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__TYPE + value: "sql" + - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOG_LEVEL + value: ${LOG_LEVEL} + - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOGGING_TO_CLOUD_WATCH_ENABLED + value: ${CLOUDWATCH_ENABLED} + - name: INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOGGING_TO_SENTRY_ENABLED + value: ${SENTRY_ENABLED} + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__DEBUG + value: ${CLOUDWATCH_DEBUG} + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__STREAM_NAME + value: ${LOG_STREAM} + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__CREATE_STREAM_IF_NOT_EXISTS + value: ${CREATE_STREAM_IF_NOT_EXISTS} + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__AWS_REGION + valueFrom: + secretKeyRef: + name: cloudwatch + key: aws_region + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__LOG_GROUP + valueFrom: + secretKeyRef: + name: cloudwatch + key: log_group_name + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__AWS_ACCESS_ID + valueFrom: + secretKeyRef: + name: cloudwatch + key: aws_access_key_id + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__CLOUDWATCH__AWS_SECRET_KEY + valueFrom: + secretKeyRef: + name: cloudwatch + key: aws_secret_access_key + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__SENTRY__DSN + valueFrom: + secretKeyRef: + key: dsn + name: dvo-writer-dsn + optional: true + - name: INSIGHTS_RESULTS_AGGREGATOR__SENTRY__ENVIRONMENT + value: ${ENV_NAME} + - name: INSIGHTS_RESULTS_AGGREGATOR__SERVER__ADDRESS + value: ":9000" + - name: INSIGHTS_RESULTS_AGGREGATOR__SERVER__API_PREFIX + value: "${DVO_WRITER_API_PREFIX}" + - name: INSIGHTS_RESULTS_AGGREGATOR__SERVER__API_SPEC_FILE + value: "/openapi/openapi.json" + - name: INSIGHTS_RESULTS_AGGREGATOR__SERVER__DEBUG + value: "true" + - name: INSIGHTS_RESULTS_AGGREGATOR__SERVER__AUTH + value: "false" + - name: INSIGHTS_RESULTS_AGGREGATOR__SERVER__AUTH_TYPE + value: "xrh" + image: ${IMAGE}:${IMAGE_TAG} + livenessProbe: + failureThreshold: 10 + httpGet: + path: "${DVO_WRITER_API_PREFIX}metrics" + port: 9000 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 60 + successThreshold: 1 + timeoutSeconds: 60 + readinessProbe: + failureThreshold: 10 + httpGet: + path: "${DVO_WRITER_API_PREFIX}metrics" + port: 9000 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 60 + successThreshold: 1 + timeoutSeconds: 60 + resources: + limits: + cpu: ${DVO_WRITER_CPU_LIMIT} + memory: ${DVO_WRITER_MEMORY_LIMIT} + requests: + cpu: ${DVO_WRITER_CPU_REQUEST} + memory: ${DVO_WRITER_MEMORY_REQUEST} + database: + sharedDbAppName: ccx-insights-results + dependencies: + - ccx-insights-results + kafkaTopics: + - replicas: 3 + partitions: 1 + topicName: ${INCOMING_TOPIC} + - replicas: 3 + partitions: 1 + topicName: ${PAYLOAD_TRACKER_TOPIC} + - replicas: 3 + partitions: 1 + topicName: ${DEAD_LETTER_QUEUE_TOPIC} +- kind: Service + apiVersion: v1 + metadata: + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "9000" + prometheus.io/scheme: http + prometheus.io/scrape: "true" + name: dvo-writer-prometheus-exporter + labels: + app: dvo-writer + spec: + ports: + - name: web + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app: dvo-writer + pod: dvo-writer-instance + +parameters: +- description: Env Name + name: ENV_NAME + required: true +- description: Image name + name: IMAGE + value: quay.io/cloudservices/insights-results-aggregator +- description: Image tag + name: IMAGE_TAG + required: true +- description: Minimum number of pods to use when autoscaling is enabled + name: MIN_REPLICAS + value: '2' +- description: Maximum number of pods to use when autoscaling is enabled + name: MAX_REPLICAS + value: '6' +- name: PG_PARAMS + value: sslmode=require +- name: KAFKA_BOOTSTRAP_HOST + required: true + value: mq-kafka +- name: KAFKA_BOOTSTRAP_PORT + required: true + value: "29092" +- name: KAFKA_TIMEOUT + value: 300s +- name: INCOMING_TOPIC + value: ccx.dvo.results + required: true +- name: DEAD_LETTER_QUEUE_TOPIC + value: ccx.dvowriter.dead.letter.queue + required: true +- name: PAYLOAD_TRACKER_TOPIC + value: platform.payload-status + required: true +- name: DVO_WRITER_SERVICE_NAME + value: dvo-writer + required: true +- name: GROUP_ID + value: dvo_writer_app + required: true +- name: DVO_WRITER_API_PREFIX + required: true + value: / +- name: LOG_STREAM + value: dvo-writer +- name: DVO_WRITER_REPLICAS + description: The number of replicas to use for the dvo-writer deployment + value: "1" +- name: DVO_WRITER_CPU_LIMIT + value: 200m +- name: DVO_WRITER_MEMORY_LIMIT + value: 400Mi +- name: DVO_WRITER_CPU_REQUEST + value: 100m +- name: DVO_WRITER_MEMORY_REQUEST + value: 200Mi +- name: CLOUDWATCH_DEBUG + value: "false" + required: true +- name: CLOUDWATCH_ENABLED + value: "true" + required: true +- name: SENTRY_ENABLED + value: "true" + required: true +- name: CREATE_STREAM_IF_NOT_EXISTS + value: "true" +- name: LOG_LEVEL + value: "INFO" diff --git a/deploy/test-dvo-writer.yaml b/deploy/test-dvo-writer.yaml new file mode 100644 index 000000000..c15e3709c --- /dev/null +++ b/deploy/test-dvo-writer.yaml @@ -0,0 +1,35 @@ +# Copyright 2021 Red Hat, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +# Bonfire deployment configuration +# Defines where to fetch the file that defines application configs + +appsFile: + host: gitlab + repo: insights-platform/cicd-common + path: bonfire_configs/ephemeral_apps.yaml + +apps: +- name: ccx-data-pipeline + components: + - name: dvo-writer + host: local + repo: . + path: deploy/dvo-writer.yaml + parameters: + ENV_NAME: env-ocm + IMAGE_TAG: latest + PG_PARAMS: "sslmode=disable" diff --git a/docker-compose.yml b/docker-compose.yml index d371f9c55..3cc9a365d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -18,8 +18,9 @@ services: ports: - 5432:5432 image: registry.redhat.io/rhscl/postgresql-10-rhel7 + platform: linux/x86_64 environment: - POSTGRESQL_USER=user - POSTGRESQL_PASSWORD=password - - POSTGRESQL_ADMIN_PASSWORD=admin - - POSTGRESQL_DATABASE=aggregator \ No newline at end of file + - POSTGRESQL_ADMIN_PASSWORD=postgres + - POSTGRESQL_DATABASE=aggregator diff --git a/docs/architecture.md b/docs/architecture.md index 20f517275..55f17eca8 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -6,30 +6,37 @@ nav_order: 1 Aggregator service consists of three main parts: -1. Consumer that reads (consumes) Insights OCP messages from specified message broker. Usually Kafka -broker is used but it might be possible to develop a interface for different broker. Insights -2. OCP messages are basically encoded in JSON and contain results generated by rule engine. -3. HTTP or HTTPS server that exposes REST API endpoints that can be used to read list of -organizations, list of clusters, read rules results for selected cluster etc. Additionally, -basic metrics are exposed as well. Those metrics is configured to be consumed by Prometheus and -visualized by Grafana. -4. Storage backend which is some instance of SQL database. Currently SQLite3 and PostgreSQL are -fully supported, but more SQL databases might be added later. +1. Consumer that reads (consumes) Insights OCP messages from specified message + broker. Usually Kafka broker is used but it might be possible to develop a + interface for different broker. Insights OCP messages are basically encoded + in JSON and contain results generated by rule engine. Different consumer can + be selected to consume and process DVO Recommendations. +2. HTTP or HTTPS server that exposes REST API endpoints that can be used to + read list of organizations, list of clusters, read rules results for + selected cluster etc. Additionally, basic metrics are exposed as well. Those + metrics is configured to be consumed by Prometheus and visualized by + Grafana. +3. Storage backend which is some instance of SQL database or Redis storage. + Currently only PostgreSQL is fully supported, but more SQL databases might + be added later. ## Whole data flow ![data_flow]({{ "assets/customer-facing-services-architecture.png" | relative_url}}) -1. Event about new data from insights operator is consumed from Kafka. That event contains (among -other things) URL to S3 Bucket -2. Insights operator data is read from S3 Bucket and Insights rules are applied to that data -3. Results (basically organization ID + cluster name + insights results JSON) are stored back into -Kafka, but into different topic -4. That results are consumed by Insights rules aggregator service that caches them -5. The service provides such data via REST API to other tools, like OpenShift Cluster Manager web -UI, OpenShift console, etc. - -Optionally, an organization allowlist can be enabled by the configuration variable +1. Event about new data from insights operator is consumed from Kafka. That + event contains (among other things) URL to S3 Bucket +2. Insights operator data is read from S3 Bucket and Insights OCP rules are + applied to that data. Alternatively DVO rules are applied to the same data. +3. Results (basically organization ID + cluster name + insights OCP + recommendations JSON or DVO recommendations) are stored back into Kafka, but + into different topic +4. That results are consumed by Insights rules aggregator service that caches + them (i.e. stores them into selected database). +5. The service provides such data via REST API to other tools, like OpenShift + Cluster Manager web UI, OpenShift console, etc. + +Optionally, a so called organization allowlist can be enabled by the configuration variable `enable_org_allowlist`, which enables processing of a .csv file containing organization IDs (path specified by the config variable `org_allowlist`) and allows report processing only for these organizations. This feature is disabled by default, and might be removed altogether in the near diff --git a/docs/authentication.md b/docs/authentication.md index dab1a225b..d1cd3e0c6 100644 --- a/docs/authentication.md +++ b/docs/authentication.md @@ -19,7 +19,6 @@ Authentication is working through `x-rh-identity` token which is provided by 3sc "last_name": "Doe", "is_active": true, "is_org_admin": false, - "is_internal": false, "locale": "en_US" }, "internal": { diff --git a/docs/ci.md b/docs/ci.md index 7c7791fc9..64594bde5 100644 --- a/docs/ci.md +++ b/docs/ci.md @@ -25,6 +25,7 @@ cyclomatic complexity > 9 * `abcgo` to measure ABC metrics for Go source code and check if the metrics does not exceed specified threshold * `golangci-lint` as Go linters aggregator with lot of linters enabled: https://golangci-lint.run/usage/linters/ +* BDD tests that checks the overall Insights Results Aggregator behaviour. Please note that all checks mentioned above have to pass for the change to be merged into master branch. diff --git a/docs/clowder.md b/docs/clowder.md index 76f480b47..6bda83220 100644 --- a/docs/clowder.md +++ b/docs/clowder.md @@ -5,7 +5,7 @@ nav_order: 3 # Clowder configuration -As the rest of the services deployed in the Console RedHat platform, the +As the rest of the services deployed in the Console Red Hat platform, the Insights Results Aggregator DB Writer should update its configuration using the relevant values extracted from the Clowder configuration file. @@ -27,10 +27,12 @@ configuration. # Insights Results Aggregator specific relevant values -This service is running in 2 different modes in the platform: +This service is running in 3 different modes in the platform: - DB Writer: the service connects to Kafka to receive messages in a - specific topic and write the results in a database. + specific topic and write the results into a SQL database. +- Cache Writer: the service connects to Kafka to receive messages in a + specific topic and write the results into Redis. - Results Aggregator: expose the database stored data into several API endpoints. @@ -39,4 +41,6 @@ different: - DB Writer needs to update its Kafka access configuration and its DB access configuration in order to work. +- Cache Writer needs to update its Kafka access configuration and its DB + access configuration in order to work. - Results Aggregator just need to update its DB access configuration. diff --git a/docs/configuration.md b/docs/configuration.md index 3a18e496c..203531e6c 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -17,9 +17,8 @@ but it can be overwritten by `INSIGHTS_RESULTS_AGGREGATOR_CONFIG_FILE` env var. Also each key in config can be overwritten by corresponding env var. For example if you have config ```toml -[storage] -db_driver = "sqlite3" -sqlite_datasource = "./aggregator.db" +[ocp_recommendations_storage] +db_driver = "postgres" pg_username = "user" pg_password = "password" pg_host = "localhost" @@ -32,8 +31,8 @@ type = "sql" and environment variables ```shell -INSIGHTS_RESULTS_AGGREGATOR__STORAGE__DB_DRIVER="postgres" -INSIGHTS_RESULTS_AGGREGATOR__STORAGE__PG_PASSWORD="your secret password" +INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__DB_DRIVER="postgres" +INSIGHTS_RESULTS_AGGREGATOR__OCP_RECOMMENDATIONS_STORAGE__PG_PASSWORD="your secret password" ``` the actual driver will be postgres with password "your secret password" @@ -58,7 +57,7 @@ Broker configuration is in section `[broker]` in config file ```toml [broker] -address = "localhost:9092" +addresses = "localhost:9092" security_protocol = "" cert_path = "" sasl_mechanism = "" @@ -75,7 +74,7 @@ org_allowlist_file = "" enable_org_allowlist = false ``` -* `address` is an address of kafka broker (DEFAULT: "") +* `addresses` is a comma separated list of addresses of Kafka brokers; e.g kafka:9093,localhost:9092,kafka_2:9092 * `security_protocol` is a value for the `security.protocol` Kafka configuration. Defaults to "" * `cert_path` is a path to a file containing an SSL certificate, only used if `secutiy_protocol` is properly set to `SSL` * `sasl_mechanism` is the SASL authentication mechanism to use when `SASL_SSL` is set as `security_protocol` @@ -96,7 +95,7 @@ consuming will be started from the most recent message (DEFAULT: false) Option names in env configuration: -* `address` - INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESS +* `addresses` - INSIGHTS_RESULTS_AGGREGATOR__BROKER__ADDRESSES * `security_protocol` - INSIGHTS_RESULTS_AGGREGATOR__BROKER__SECURITY_PROTOCOL * `cert_path` - INSIGHTS_RESULTS_AGGREGATOR__BROKER__CERT_PATH * `sasl_mechanism` - INSIGHTS_RESULTS_AGGREGATOR__BROKER__SASL_MECHANISM @@ -174,6 +173,48 @@ debug = false you can add `$HOSTNAME` to the stream name so that they aren't writing to the same stream at once * `debug` is an option to enable debug output of cloudwatch logging +## Storage configuration + +Two storage backends can be configured separately: + +* Storage for OCP recommendations +* Storage for DVO recommendations + +For each storage, specific section in configuration file is used: + +```toml +[ocp_recommendations_storage] +db_driver = "postgres" +pg_username = "user" +pg_password = "password" +pg_host = "localhost" +pg_port = 5432 +pg_db_name = "aggregator" +pg_params = "" +log_sql_queries = true +type = "sql" + +[dvo_recommendations_storage] +db_driver = "postgres" +pg_username = "user" +pg_password = "password" +pg_host = "localhost" +pg_port = 5432 +pg_db_name = "aggregator" +pg_params = "" +log_sql_queries = true +type = "sql" +``` + +Actually used storage backend is selected by the following configuration option: + +```toml +[storage_backend] +use = "ocp_recommendations" +``` + +By default OCP recommendations storage is selected if no backend is configured. + ## Redis configuration Redis configuration is in section `[redis]` in config file diff --git a/docs/database.md b/docs/database.md index 2aab0cae2..0bbeeca8e 100644 --- a/docs/database.md +++ b/docs/database.md @@ -13,12 +13,13 @@ no-ops (empty operations). In this mode, no DB-related operation will fail. ## PostgreSQL configuration -To establish connection to the PostgreSQL instance provided by the minimal stack in -`docker-compose.yml` for local setup, the following configuration options need to be changed in -`storage` section of `config.toml`: +To establish connection to the PostgreSQL instance provided by the minimal +stack in `docker-compose.yml` for local setup, the following configuration +options need to be changed in `ocp_recommendations_storage`, +`dvo_recommendations_storage` and `storage_backend` sections of `config.toml`: ```toml -[storage] +[ocp_recommendations_storage] db_driver = "postgres" pg_username = "user" pg_password = "password" @@ -26,6 +27,18 @@ pg_host = "localhost" pg_port = 55432 pg_db_name = "aggregator" pg_params = "sslmode=disable" + +[dvo_recommendations_storage] +db_driver = "postgres" +pg_username = "user" +pg_password = "password" +pg_host = "localhost" +pg_port = 55432 +pg_db_name = "aggregator" +pg_params = "sslmode=disable" + +[storage_backend] +use = "ocp_recommendations_storage" ``` ## Redis configuration diff --git a/docs/db_retention_policy.md b/docs/db_retention_policy.md index 39531e72e..0df5343b1 100644 --- a/docs/db_retention_policy.md +++ b/docs/db_retention_policy.md @@ -16,7 +16,7 @@ layout: page ## List of tables -All tables that are stored in external data pipeline database: +All tables that are stored in external data pipeline database (OCP Recommendations): ``` Schema | Name | Type diff --git a/docs/db_structure.md b/docs/db_structure.md index 8a023fc0a..1b5bc72b1 100644 --- a/docs/db_structure.md +++ b/docs/db_structure.md @@ -37,8 +37,8 @@ for more details about tables, indexes, and keys. This table is used as a cache for reports consumed from broker. Size of this table (i.e. number of records) scales linearly with the number of clusters, -because only latest report for given cluster is stored (it is guarantied by DB -constraints). That table has defined compound key `org_id+cluster`, +because only latest valid report for given cluster is stored (it is guarantied +by DB constraints). That table has defined compound key `org_id+cluster`, additionally `cluster` name needs to be unique across all organizations. Additionally `kafka_offset` is used to speedup consuming messages from Kafka topic in case the offset is lost due to issues in Kafka, Kafka library, or @@ -56,6 +56,25 @@ CREATE TABLE report ( ) ``` +We consider a report as valid if it includes all the required fields described +in the [agreed-upon report structure]( +https://redhatinsights.github.io/insights-data-schemas/external-pipeline/ccx_data_pipeline.html#format-of-the-report-node). + +If any of those fields is missing, we interpret it as a malformed report, most- +probably due to an error when the Insights Core engine processed the archive. In +that situation, the processing of the archive is aborted without storing any new +information in the databases. Therefore, it is important to understand the +difference between: +- an **empty** report, which is stored in the databases, as it indicates that any +previously found issue in the cluster has been resolved or is no longer happening. +- a **malformed** report, which can be empty but is missing required attributes, +and is not stored in the database as there is no guarantee that it represents the +latest state of the cluster. + +To learn more about Insights Core processing, please refer to the [Red Hat Insights Core]( +https://insights-core.readthedocs.io/en/latest/intro.html#id1) documentation. + + ## Table `rule_hit` This table represents the content for Insights rules to be displayed by OCM. @@ -89,9 +108,11 @@ CREATE TABLE cluster_rule_user_feedback ( updated_at TIMESTAMP NOT NULL, PRIMARY KEY(cluster_id, rule_id, user_id, error_key), + CONSTRAINT cluster_rule_user_feedback_cluster_id_fkey FOREIGN KEY (cluster_id) REFERENCES report(cluster) ON DELETE CASCADE, + CONSTRAINT cluster_rule_user_feedback_rule_id_fkey FOREIGN KEY (rule_id) REFERENCES rule(module) ON DELETE CASCADE @@ -111,7 +132,7 @@ CREATE TABLE cluster_rule_toggle ( enabled_at TIMESTAMP NULL, updated_at TIMESTAMP NOT NULL, - CHECK (disabled >= 0 AND disabled <= 1), + disabled_check SMALLINT CHECK (disabled >= 0 AND disabled <= 1), PRIMARY KEY(cluster_id, rule_id, user_id) ) diff --git a/docs/documentation_for_developers.md b/docs/documentation_for_developers.md index 3aa1b58b6..33d2b1b0a 100644 --- a/docs/documentation_for_developers.md +++ b/docs/documentation_for_developers.md @@ -7,10 +7,9 @@ nav_order: 16 All packages developed in this project have documentation available on [GoDoc server](https://godoc.org/): * [entry point to the service](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator) -* [package `broker`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/broker) -* [package `consumer`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/consumer) -* [package `content`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/content) -* [package `logger`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/logger) +* [package `broker`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/conf) +* [package `conf`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/consumer) +* [package `consumer`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/content) * [package `metrics`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/metrics) * [package `migration`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/migration) * [package `producer`](https://godoc.org/github.com/RedHatInsights/insights-results-aggregator/producer) diff --git a/docs/references.md b/docs/references.md index b46395e72..3ab2d7afd 100644 --- a/docs/references.md +++ b/docs/references.md @@ -3,5 +3,9 @@ layout: page nav_order: 18 --- # References +- [Smart Proxy](https://github.com/RedHatInsights/smart-proxy) - [Insights Data Schemas](https://redhatinsights.github.io/insights-data-schemas/) - [Insights Results Aggregator Data](https://github.com/RedHatInsights/insights-results-aggregator-data) +- [Insights Results Aggregator Cleaner](https://github.com/RedHatInsights/insights-results-aggregator-cleaner) +- [Insights Results Aggregator Exporter](https://github.com/RedHatInsights/insights-results-aggregator-exporter) +- [Insights Content Service](https://github.com/RedHatInsights/insights-content-service) diff --git a/docs/testing.md b/docs/testing.md index 048fabe75..b2f765f1e 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -57,7 +57,6 @@ By default all logs from the application aren't shown, if you want to see them, To make a coverage report you need to start `./make-coverage.sh` tool with one of these arguments: -1. `unit-sqlite` unit tests with sqlite in memory database 1. `unit-posgres` unit tests with postgres database(don't forget to start `docker-compose up` with the DB) 1. `rest` REST API tests from `test.sh` file 1. `integration` Any external tests, for example from iqe-ccx-plugin. @@ -65,7 +64,7 @@ Only this option requires you to run tests manually and stop the script by `Ctrl For example: -`./make-coverage.sh unit-sqlite` will generate a report file `coverage.out` +`./make-coverage.sh unit-postgres` will generate a report file `coverage.out` which you can investigate by either of those commands: - `go tool cover -func=coverage.out` diff --git a/go.mod b/go.mod index 880d5ae3f..e4fe6d6df 100644 --- a/go.mod +++ b/go.mod @@ -1,85 +1,86 @@ module github.com/RedHatInsights/insights-results-aggregator -go 1.18 +go 1.20 require ( - github.com/BurntSushi/toml v0.3.1 - github.com/DATA-DOG/go-sqlmock v1.5.0 - github.com/RedHatInsights/insights-content-service v0.0.0-20230607085809-59604fce8997 - github.com/RedHatInsights/insights-operator-utils v1.24.9 + github.com/BurntSushi/toml v1.3.2 + github.com/DATA-DOG/go-sqlmock v1.5.2 + github.com/RedHatInsights/insights-operator-utils v1.25.4 github.com/RedHatInsights/insights-results-aggregator-data v1.3.9 github.com/RedHatInsights/insights-results-types v1.3.23 github.com/Shopify/sarama v1.27.1 - github.com/deckarep/golang-set v1.7.1 + github.com/deckarep/golang-set v1.8.0 github.com/gchaincl/sqlhooks v1.3.0 - github.com/go-redis/redismock/v9 v9.0.3 - github.com/google/uuid v1.3.0 - github.com/gorilla/mux v1.8.0 + github.com/go-redis/redismock/v9 v9.2.0 + github.com/google/uuid v1.6.0 + github.com/gorilla/mux v1.8.1 github.com/lib/pq v1.10.9 - github.com/mattn/go-sqlite3 v2.0.3+incompatible - github.com/prometheus/client_golang v1.15.1 - github.com/prometheus/client_model v0.3.0 - github.com/redhatinsights/app-common-go v1.6.3 - github.com/rs/zerolog v1.29.1 - github.com/spf13/viper v1.16.0 - github.com/stretchr/testify v1.8.3 + github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_model v0.6.0 + github.com/redhatinsights/app-common-go v1.6.7 + github.com/rs/zerolog v1.32.0 + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.9.0 github.com/verdverm/frisby v0.0.0-20170604211311-b16556248a9a - golang.org/x/sync v0.1.0 + github.com/xdg/scram v1.0.5 + golang.org/x/sync v0.6.0 ) require ( github.com/RedHatInsights/cloudwatch v0.0.0-20210111105023-1df2bdfe3291 // indirect github.com/RedHatInsights/kafka-zerolog v1.0.0 // indirect - github.com/archdx/zerolog-sentry v0.0.1 // indirect - github.com/aws/aws-sdk-go v1.35.7 // indirect + github.com/archdx/zerolog-sentry v1.8.2 // indirect + github.com/aws/aws-sdk-go v1.50.16 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-simplejson v0.5.0 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/getkin/kin-openapi v0.22.1 // indirect - github.com/getsentry/sentry-go v0.6.1 // indirect + github.com/getsentry/sentry-go v0.24.1 // indirect github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-yaml/yaml v2.1.0+incompatible // indirect github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/golang/snappy v0.0.3 // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/klauspost/compress v1.11.1 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mozillazg/request v0.8.0 // indirect - github.com/pelletier/go-toml v1.8.1 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pierrec/lz4 v2.5.2+incompatible // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect - github.com/redis/go-redis/v9 v9.0.5 // indirect + github.com/redis/go-redis/v9 v9.4.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/segmentio/kafka-go v0.4.10 // indirect - github.com/spf13/afero v1.9.5 // indirect - github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/subosito/gotenv v1.4.2 // indirect - golang.org/x/crypto v0.9.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/xdg/stringprep v1.0.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.18.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/h2non/gock.v1 v1.1.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect diff --git a/go.sum b/go.sum index 15df43c98..2866ad079 100644 --- a/go.sum +++ b/go.sum @@ -3,49 +3,24 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= @@ -57,17 +32,9 @@ github.com/RedHatInsights/cloudwatch v0.0.0-20200512151223-b0b55757a24b/go.mod h github.com/RedHatInsights/cloudwatch v0.0.0-20210111105023-1df2bdfe3291 h1:f2RIq2LvG0Nz7TrPYr8clzUPXIEf+Q3oDoCfAHym4/I= github.com/RedHatInsights/cloudwatch v0.0.0-20210111105023-1df2bdfe3291/go.mod h1:8l+HqU8iWM6hA9kSAHgY3ItSlpEsPr8fb2R0GBp9S0U= github.com/RedHatInsights/insights-content-service v0.0.0-20200619153839-23a428468a08/go.mod h1:CSzDwoJXMIPnRNOZfYn68YbXejRqSr0i4uqtB6oLYt4= -github.com/RedHatInsights/insights-content-service v0.0.0-20221024073309-fabee4bcb06e h1:vyw0nkzMTnBB4rP7z5HpdH1bWI4CywSvxHd+ECs8uTU= -github.com/RedHatInsights/insights-content-service v0.0.0-20221024073309-fabee4bcb06e/go.mod h1:9bnb8zbE7ocCmrvVpmIRgCOcb2ew7ovEcReMB953Pbo= -github.com/RedHatInsights/insights-content-service v0.0.0-20230529094240-4216523486cc h1:bi0AoZ7gQfSQcT/roa0cPIMA8kDMYGbxLgH9641ZtJQ= -github.com/RedHatInsights/insights-content-service v0.0.0-20230529094240-4216523486cc/go.mod h1:6nihzwhfwTZHM8ClPwISTntuYPf6W2UzcI+yPeUPMRk= -github.com/RedHatInsights/insights-content-service v0.0.0-20230607085809-59604fce8997 h1:gMdRIId8AbNqTtNyrL8vyKzuSLdmNfI6BgB85mioMYg= -github.com/RedHatInsights/insights-content-service v0.0.0-20230607085809-59604fce8997/go.mod h1:6nihzwhfwTZHM8ClPwISTntuYPf6W2UzcI+yPeUPMRk= github.com/RedHatInsights/insights-operator-utils v1.0.1/go.mod h1:gRzYBMY4csuOXgrxUuC10WUkz6STOm3mqVsQCb+AGOQ= github.com/RedHatInsights/insights-operator-utils v1.0.2-0.20200610143236-c868b2f93d2a/go.mod h1:0rhk13kn0BB+yKdfZjpMoQy0lXdXY3i4NJ1xGwGMcII= github.com/RedHatInsights/insights-operator-utils v1.4.1-0.20200729093922-bca68530a5ef/go.mod h1:F7KKAdWFR70H+3fa89ciXqimNycHdqO9HjLWRdZwOug= -github.com/RedHatInsights/insights-operator-utils v1.4.3 h1:QSMNFHvkluvVH6EJjuPe45r5U6PmaUcsDmiAgQ8gvMA= -github.com/RedHatInsights/insights-operator-utils v1.4.3/go.mod h1:F7KKAdWFR70H+3fa89ciXqimNycHdqO9HjLWRdZwOug= github.com/RedHatInsights/insights-operator-utils v1.6.2/go.mod h1:RW9Jq4LgqIkV3WY6AS2EkopYyZDIr5BNGiU5I75HryM= github.com/RedHatInsights/insights-operator-utils v1.6.7/go.mod h1:ott1/rkxcyQtK6XdYj1Ur3XGSYRAHTplJiV5RKkij2o= github.com/RedHatInsights/insights-operator-utils v1.8.3/go.mod h1:L6alrkNSM+uBzlQdIihhGnwTpdw+bD8i8Fdh/OE9rdo= @@ -75,13 +42,9 @@ github.com/RedHatInsights/insights-operator-utils v1.12.0/go.mod h1:mN5jURLpSG+j github.com/RedHatInsights/insights-operator-utils v1.21.0/go.mod h1:B2hizFGwXCc8MT34QqVJ1A8ANTyGQZQWXQw/gSCEsaU= github.com/RedHatInsights/insights-operator-utils v1.21.2/go.mod h1:3Pfsgsi7GCu2wgIqQlt1llpyQyyxsDWEGdgnPvadM40= github.com/RedHatInsights/insights-operator-utils v1.21.8/go.mod h1:qa1a8NdarIzcZkr5mu9fBw4OarOfg1qZFZC1vNGbyas= -github.com/RedHatInsights/insights-operator-utils v1.22.0/go.mod h1:4G1aWUV3SBc5tRflpAZX2BjoWB8afxXtSutg+5/sLE8= -github.com/RedHatInsights/insights-operator-utils v1.24.5 h1:db3rOWDJXLoTHlCgnXlPktr5hlE5yYV0V7xh2lr+Hqo= github.com/RedHatInsights/insights-operator-utils v1.24.5/go.mod h1:7qR/8rlMdiqoXAkZyQ5JhVrVNCa6SBwNUt4KMq/17j4= -github.com/RedHatInsights/insights-operator-utils v1.24.7 h1:Itxd19oqE6+4RYX5+EAf66ZLB32tRaJNHykPwKQ9Dtw= -github.com/RedHatInsights/insights-operator-utils v1.24.7/go.mod h1:+PYvAKGWx/jXEDhvAkmA+RkxcFFO6RFn9QEgPwRno10= -github.com/RedHatInsights/insights-operator-utils v1.24.9 h1:z4kaXy2W/B19oetL43HIjPpZHD6u4kedcU1uryOlqD4= -github.com/RedHatInsights/insights-operator-utils v1.24.9/go.mod h1:4PpsFOgDkHvZ7K+IWGOALkdmAbXp4A3PFN6bj+mNK3M= +github.com/RedHatInsights/insights-operator-utils v1.25.4 h1:H5iXuvsxFkjmGmAefkv6b+FxQWq9b0me7FPa7Fx8M1Y= +github.com/RedHatInsights/insights-operator-utils v1.25.4/go.mod h1:GGa8HtCNKyfugkNkpVojp+mO24N27npbgi85QkpPteA= github.com/RedHatInsights/insights-results-aggregator v0.0.0-20200604090056-3534f6dd9c1c/go.mod h1:7Pc15NYXErx7BMJ4rF1Hacm+29G6atzjhwBpXNFMt+0= github.com/RedHatInsights/insights-results-aggregator-data v0.0.0-20200825113234-e84e924194bc/go.mod h1:DcDgoCCmBuUSKQOGrTi0BfFLdSjAp/KxIwyqKUd46sM= github.com/RedHatInsights/insights-results-aggregator-data v0.0.0-20201014142608-de97c4b07d5c/go.mod h1:x8IvreR2g24veCKVMXDPOR6a0D86QK9UCBfi5Xm5Gnc= @@ -91,18 +54,10 @@ github.com/RedHatInsights/insights-results-aggregator-data v1.1.2/go.mod h1:rbic github.com/RedHatInsights/insights-results-aggregator-data v1.3.1/go.mod h1:Ylo2cWFmraBzkwKLew54kZSsUTgeVvFJdIi/oRkdxtc= github.com/RedHatInsights/insights-results-aggregator-data v1.3.2/go.mod h1:E1UaB+IjJ/muxvMstVoqJrB82zVKNykjTtCiM3tMHoM= github.com/RedHatInsights/insights-results-aggregator-data v1.3.3/go.mod h1:udHNC7lBxYnu9AqMahABqvuclCzWUWSkbacQbUaehfI= -github.com/RedHatInsights/insights-results-aggregator-data v1.3.6 h1:RcZsn25t+km9/VBAcbks5oLx21HI0aQvLFldgEja5NY= -github.com/RedHatInsights/insights-results-aggregator-data v1.3.6/go.mod h1:tOwmlIB5irSv1mTLgONuLrsbTp4vokl4ClJFClrrXX0= -github.com/RedHatInsights/insights-results-aggregator-data v1.3.8 h1:dYWiAdeNRb7iutWxNQBuuVrWH/QBqWx1g7RhOk+SjEw= -github.com/RedHatInsights/insights-results-aggregator-data v1.3.8/go.mod h1:YOtil8P/1Zy/XewZejn4hjXKrcvmaxIqhU7mjwCGo/o= github.com/RedHatInsights/insights-results-aggregator-data v1.3.9 h1:D6JtouoQs606xOIQaQVmAFi+tgw/UEv/POarE46VdEY= github.com/RedHatInsights/insights-results-aggregator-data v1.3.9/go.mod h1:sL0aXaqEq/EzjMEj8QHv13RjfnSXvv2f2q/7OHSOVCQ= github.com/RedHatInsights/insights-results-types v1.2.0/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= -github.com/RedHatInsights/insights-results-types v1.3.7/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= -github.com/RedHatInsights/insights-results-types v1.3.20 h1:02zlYmyZWkUCryHYgShrfYYO0ZJK3oYV5DaxeP5IZ2g= github.com/RedHatInsights/insights-results-types v1.3.20/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= -github.com/RedHatInsights/insights-results-types v1.3.21/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= -github.com/RedHatInsights/insights-results-types v1.3.22 h1:bQgFSG3RbcusnUjVti6l9C0IN4NzG1Q0IRQut9FdD8o= github.com/RedHatInsights/insights-results-types v1.3.22/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= github.com/RedHatInsights/insights-results-types v1.3.23 h1:F0QlBqZup7KKEzxe84/Wmp9kunuoHbQf9Ir3kLpUBAk= github.com/RedHatInsights/insights-results-types v1.3.23/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= @@ -127,8 +82,9 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/archdx/zerolog-sentry v0.0.1 h1:AUDjd1ALUK1jCVsOrOMzKv7hZNcid7F73DoZNM3m1PA= github.com/archdx/zerolog-sentry v0.0.1/go.mod h1:dAIUEqBAhDI/yVS3nqOr7VS9BsvHJ5btxoGFEE2RmGk= +github.com/archdx/zerolog-sentry v1.8.2 h1:zS8n0+H7SsG161RN8dP47CSsdyrhODdo9LEDOPXJhXI= +github.com/archdx/zerolog-sentry v1.8.2/go.mod h1:XrFHGe1CH5DQk/XSySu/IJSi5C9XR6+zpc97zVf/c4c= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -139,10 +95,10 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.30.25/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.35.7 h1:FHMhVhyc/9jljgFAcGkQDYjpC9btM0B8VfkLBfctdNE= github.com/aws/aws-sdk-go v1.35.7/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go v1.50.16 h1:/KuHK+Sadp9BKXWWtMhPtBdj+PLIFCnQZxQnsuLhxKc= +github.com/aws/aws-sdk-go v1.50.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -155,6 +111,8 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/sarama-cluster v2.1.10+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/buger/jsonparser v1.0.0/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= @@ -165,20 +123,12 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash v1.0.0/go.mod h1:fX/lfQBkSCDXZSUgv6jVIu/EVA3/JNseAX5asI4c4T4= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= @@ -198,10 +148,12 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= +github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= +github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -220,11 +172,7 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= @@ -236,13 +184,12 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= -github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fzipp/gocyclo v0.0.0-20150627053110-6acd4345c835/go.mod h1:BjL/N0+C+j9uNX+1xcNuM9vdSIcXCZrQZUYbXOFbgN8= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gchaincl/sqlhooks v1.3.0 h1:yKPXxW9a5CjXaVf2HkQn6wn7TZARvbAOAelr3H8vK2Y= @@ -250,36 +197,31 @@ github.com/gchaincl/sqlhooks v1.3.0/go.mod h1:9BypXnereMT0+Ys8WGWHqzgkkOfHIhyeUC github.com/getkin/kin-openapi v0.20.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw= github.com/getkin/kin-openapi v0.22.1 h1:ODA1olTp175o//NfHko/uCAAhwUSfm5P4+K52XvTg4w= github.com/getkin/kin-openapi v0.22.1/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw= -github.com/getsentry/sentry-go v0.6.1 h1:K84dY1/57OtWhdyr5lbU78Q/+qgzkEyGc/ud+Sipi5k= github.com/getsentry/sentry-go v0.6.1/go.mod h1:0yZBuzSvbZwBnvaF9VwZIMen3kXscY8/uasKtAX1qG8= +github.com/getsentry/sentry-go v0.24.1 h1:W6/0GyTy8J6ge6lVCc94WB6Gx2ZuLrgopnn9w8Hiwuk= +github.com/getsentry/sentry-go v0.24.1/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-redis/redismock/v9 v9.0.3 h1:mtHQi2l51lCmXIbTRTqb1EiHYe9tL5Yk5oorlSJJqR0= -github.com/go-redis/redismock/v9 v9.0.3/go.mod h1:F6tJRfnU8R/NZ0E+Gjvoluk14MqMC5ueSZX6vVQypc0= +github.com/go-redis/redismock/v9 v9.2.0 h1:ZrMYQeKPECZPjOj5u9eyOjg8Nnb0BS9lkVIZ6IpsKLw= +github.com/go-redis/redismock/v9 v9.2.0/go.mod h1:18KHfGDK4Y6c2R0H38EUGWAdc7ZQS9gfYxc94k7rWT0= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= @@ -293,23 +235,15 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -318,17 +252,11 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -336,47 +264,32 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -429,8 +342,6 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= @@ -454,11 +365,7 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= @@ -472,13 +379,15 @@ github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6i github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.1 h1:bPb7nMRdOZYDrpPMTA3EInUQrdgoBinqUuSwlGdKDdE= github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -486,8 +395,8 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -504,39 +413,33 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.4 h1:8KGKTcQQGm0Kv7vEbKFErAoAOFyyacLStRtQSeYtvkY= github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= -github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= -github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= -github.com/microcosm-cc/bluemonday v1.0.21/go.mod h1:ytNkv4RrDrLJ2pqlsSI46O6IVXmZOBBD4SaJyDwwTkM= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -546,7 +449,6 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -554,7 +456,6 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mozillazg/request v0.8.0 h1:TbXeQUdBWr1J1df5Z+lQczDFzX9JD71kTCl7Zu/9rNM= github.com/mozillazg/request v0.8.0/go.mod h1:weoQ/mVFNbWgRBtivCGF1tUT9lwneFesues+CleXMWc= @@ -573,8 +474,8 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -583,9 +484,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= @@ -600,10 +503,9 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -618,9 +520,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -630,20 +532,16 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -653,12 +551,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -668,34 +562,34 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/redhatinsights/app-common-go v1.5.1/go.mod h1:SqgG5JkX/RNlk2d+sXamIFxhOIvWLgCBr8uK6q70ESk= -github.com/redhatinsights/app-common-go v1.6.3 h1:HhjDKLBqQM5i8Ii58WLi5hG+lTNaKgpAEnJ2vdVUJtw= -github.com/redhatinsights/app-common-go v1.6.3/go.mod h1:6gzRyg8ZyejwMCksukeAhh2ZXOB3uHSmBsbP06fG2PQ= -github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl5o= -github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= +github.com/redhatinsights/app-common-go v1.6.7 h1:cXWW0F6ZW53RLRr54gn7Azo9CLTysYOmFDR0D0Qd0Fs= +github.com/redhatinsights/app-common-go v1.6.7/go.mod h1:6gzRyg8ZyejwMCksukeAhh2ZXOB3uHSmBsbP06fG2PQ= +github.com/redis/go-redis/v9 v9.4.0 h1:Yzoz33UZw9I/mFhx4MNrB6Fk+XHO1VukNcCa1+lwyKk= +github.com/redis/go-redis/v9 v9.4.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= -github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs= github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= -github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc= -github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -706,31 +600,25 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.5.1 h1:VHu76Lk0LSP1x254maIu2bplkWpfBWI+B+6fdoZprcg= -github.com/spf13/afero v1.5.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -740,18 +628,16 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.2-0.20210415161207-7fdb267c730d h1:0tCNUHawuiHzvZQ9rWjDcDK8Vzj8YOnb7GNBmk+NY4A= -github.com/spf13/viper v1.7.2-0.20210415161207-7fdb267c730d/go.mod h1:omGOaCUGS+skIJZczeljhvaqA6JN5Bom44YnqMoMi9I= -github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= -github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -760,14 +646,12 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tisnik/go-capture v1.0.1/go.mod h1:NArgKXuvcG6gOW2SQoPGKy6TuiKBttQ2ZV0/zC4zVaY= github.com/tj/go-gracefully v0.0.0-20141227061038-005c1d102f1b/go.mod h1:uqlTeGUUfRdQvlQGkv+DYe3lLST3DionEwMA9YAYibY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -789,8 +673,9 @@ github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPU github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/verdverm/frisby v0.0.0-20170604211311-b16556248a9a h1:Mt+KWT4h97wIDQahX1eD3OLkmc/fGbLy7EndiE85kMQ= github.com/verdverm/frisby v0.0.0-20170604211311-b16556248a9a/go.mod h1:Z+jvFzFlZ6eHAKMfi8PZZphUtg4S0gc2EZYOL9UnWgA= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw= +github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -802,12 +687,7 @@ github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmv github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -817,15 +697,16 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= @@ -844,23 +725,15 @@ golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -871,21 +744,13 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -906,72 +771,30 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c/go.mod h1:iQL9McJNjoIa5mjH6nYTCTZXUN6RP+XW3eib7Ya3XcI= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -997,76 +820,33 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201005172224-997123666555/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1095,41 +875,11 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1140,28 +890,11 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1172,33 +905,7 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1208,18 +915,8 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1228,32 +925,25 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= -gopkg.in/h2non/gock.v1 v1.0.15 h1:SzLqcIlb/fDfg7UvukMpNcWsu7sI5tWwL+KCATZqks0= gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1270,6 +960,7 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1291,8 +982,6 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/local_storage/populate_db_with_mock_data.sh b/local_storage/populate_db_with_mock_data.sh index 586c63ff9..a20afdd4b 100755 --- a/local_storage/populate_db_with_mock_data.sh +++ b/local_storage/populate_db_with_mock_data.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright 2020 Red Hat, Inc +# Copyright 2020, 2023 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,8 +14,7 @@ # limitations under the License. -DATABASE=test.db - SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )" -sqlite3 "${SCRIPT_DIR}/../${DATABASE}" < "${SCRIPT_DIR}/test_data_sqlite.sql" +psql "dbname=aggregator user=postgres password=postgres host=localhost sslmode=disable" -f "${SCRIPT_DIR}/test_data.sql" + diff --git a/local_storage/test_data.sql b/local_storage/test_data.sql new file mode 100755 index 000000000..84283dd30 --- /dev/null +++ b/local_storage/test_data.sql @@ -0,0 +1,38 @@ +-- Delete from report +DELETE FROM report; + +-- Insert into report +INSERT INTO report (org_id, cluster, report, reported_at, last_checked_at) +VALUES + (1, '00000000-0000-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP), + (1, '00000000-0000-0000-ffff-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP), + (1, '00000000-0000-0000-0000-ffffffffffff', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP), + (2, '00000000-ffff-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP), + (2, '00000000-0000-ffff-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP), + (3, 'aaaaaaaa-0000-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP), + (3, 'addddddd-0000-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP), + (4, 'addddddd-bbbb-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP), + (4, 'addddddd-bbbb-cccc-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); + +-- Delete from recommendation +DELETE FROM recommendation; + +-- Insert into recommendation +INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key, rule_id, created_at) +VALUES + (1, '11111111-1111-1111-1111-111111111111', 'ccx_rules_ocp.external.rules.node_installer_degraded', 'ek1', 'ccx_rules_ocp.external.rules.node_installer_degraded|ek1', CURRENT_TIMESTAMP), + (2, '22222222-2222-2222-2222-222222222222', 'ccx_rules_ocp.external.rules.node_installer_degraded', 'ek1', 'ccx_rules_ocp.external.rules.node_installer_degraded|ek1', CURRENT_TIMESTAMP), + (3, '33333333-3333-3333-3333-333333333333', 'ccx_rules_ocp.external.rules.node_installer_degraded', 'ek1', 'ccx_rules_ocp.external.rules.node_installer_degraded|ek1', CURRENT_TIMESTAMP); + +-- Delete from report_info +DELETE FROM report_info; + +-- Insert into report_info +INSERT INTO report_info (org_id, cluster_id, version_info) +VALUES + (1, '11111111-1111-1111-1111-111111111111', '1.0'), + (2, '22222222-2222-2222-2222-222222222222', ''); + +-- Delete from cluster_rule_user_feedback +DELETE FROM cluster_rule_user_feedback; + diff --git a/local_storage/test_data_sqlite.sql b/local_storage/test_data_sqlite.sql deleted file mode 100644 index ea48bd27b..000000000 --- a/local_storage/test_data_sqlite.sql +++ /dev/null @@ -1,38 +0,0 @@ --- Copyright 2020 Red Hat, Inc --- --- Licensed under the Apache License, Version 2.0 (the "License"); --- you may not use this file except in compliance with the License. --- You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. - -delete from report; - -insert into report (org_id, cluster, report, reported_at, last_checked_at) values (1, '00000000-0000-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); -insert into report (org_id, cluster, report, reported_at, last_checked_at) values (1, '00000000-0000-0000-ffff-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); -insert into report (org_id, cluster, report, reported_at, last_checked_at) values (1, '00000000-0000-0000-0000-ffffffffffff', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); -insert into report (org_id, cluster, report, reported_at, last_checked_at) values (2, '00000000-ffff-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); -insert into report (org_id, cluster, report, reported_at, last_checked_at) values (2, '00000000-0000-ffff-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); -insert into report (org_id, cluster, report, reported_at, last_checked_at) values (3, 'aaaaaaaa-0000-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); -insert into report (org_id, cluster, report, reported_at, last_checked_at) values (3, 'addddddd-0000-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); -insert into report (org_id, cluster, report, reported_at, last_checked_at) values (4, 'addddddd-bbbb-0000-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); -insert into report (org_id, cluster, report, reported_at, last_checked_at) values (4, 'addddddd-bbbb-cccc-0000-000000000000', '{}', CURRENT_TIMESTAMP, CURRENT_TIMESTAMP); - -delete from recommendation; - -INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key, rule_id, created_at) VALUES (1, '11111111-1111-1111-1111-111111111111', 'ccx_rules_ocp.external.rules.node_installer_degraded', 'ek1', 'ccx_rules_ocp.external.rules.node_installer_degraded|ek1', CURRENT_TIMESTAMP); -INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key, rule_id, created_at) VALUES (2, '22222222-2222-2222-2222-222222222222', 'ccx_rules_ocp.external.rules.node_installer_degraded', 'ek1', 'ccx_rules_ocp.external.rules.node_installer_degraded|ek1', CURRENT_TIMESTAMP); -INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key, rule_id, created_at) VALUES (3, '33333333-3333-3333-3333-333333333333', 'ccx_rules_ocp.external.rules.node_installer_degraded', 'ek1', 'ccx_rules_ocp.external.rules.node_installer_degraded|ek1', CURRENT_TIMESTAMP); - -delete from report_info; - -INSERT INTO report_info(org_id, cluster_id, version_info) VALUES (1, '11111111-1111-1111-1111-111111111111', '1.0'); -INSERT INTO report_info(org_id, cluster_id, version_info) VALUES (2, '22222222-2222-2222-2222-222222222222', ''); - -delete from cluster_rule_user_feedback; diff --git a/make-coverage.sh b/make-coverage.sh index f7dfbe682..04149bd2e 100755 --- a/make-coverage.sh +++ b/make-coverage.sh @@ -20,10 +20,6 @@ TIMEOUT_INTEGRATION=1440m rm coverage.out 2>/dev/null case $1 in -"unit-sqlite") - echo "Running unit tests with SQLite in memory..." - go test -timeout $TIMEOUT -coverprofile=coverage.out ./... 1>&2 - ;; "unit-postgres") export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB="postgres" export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS="admin" @@ -64,7 +60,7 @@ case $1 in go test -timeout $TIMEOUT_INTEGRATION -v -tags testrunmain -run "^TestRunMain$" -coverprofile=coverage.out -coverpkg="./..." . 1>&2 ;; *) - echo 'Please, choose "unit-sqlite", "unit-postgres", "rest" or "integration"' + echo 'Please, choose "unit-postgres", "rest" or "integration"' echo "Aggregator's output will be redirected to stderr." echo "Coverage is saved to 'coverage.out' file" exit 1 diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go index 4262d94d4..83f5572d7 100644 --- a/metrics/metrics_test.go +++ b/metrics/metrics_test.go @@ -60,10 +60,10 @@ func getCounterValue(counter prometheus.Counter) float64 { return pb.GetCounter().GetValue() } -// TestConsumedMessagesMetric tests that consumed messages metric works -func TestConsumedMessagesMetric(t *testing.T) { +// TestConsumedOCPMessagesMetric tests that consumed messages metric works +func TestConsumedOCPMessagesMetric(t *testing.T) { helpers.RunTestWithTimeout(t, func(t testing.TB) { - mockConsumer, closer := ira_helpers.MustGetMockKafkaConsumerWithExpectedMessages( + mockConsumer, closer := ira_helpers.MustGetMockOCPRulesConsumerWithExpectedMessages( t, testTopicName, testOrgAllowlist, []string{testdata.ConsumerMessage, testdata.ConsumerMessage}, ) defer closer() @@ -80,7 +80,7 @@ func TestConsumedMessagesMetric(t *testing.T) { func TestProducedMessagesMetric(t *testing.T) { brokerCfg := broker.Configuration{ - Address: "localhost:1234", + Addresses: "localhost:1234", Topic: "consumer-topic", PayloadTrackerTopic: "payload-tracker-topic", Group: "test-group", @@ -115,7 +115,7 @@ func TestProducedMessagesMetric(t *testing.T) { } func TestWrittenReportsMetric(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() // other tests may run at the same process diff --git a/migration/dvomigrations/actual_migrations_test.go b/migration/dvomigrations/actual_migrations_test.go new file mode 100644 index 000000000..0f792ea3e --- /dev/null +++ b/migration/dvomigrations/actual_migrations_test.go @@ -0,0 +1,41 @@ +// Copyright 2020, 2021, 2022 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dvomigrations_test + +import ( + "testing" + + "github.com/RedHatInsights/insights-results-aggregator/migration" + "github.com/RedHatInsights/insights-results-aggregator/migration/dvomigrations" + "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" +) + +func TestAllMigrations(t *testing.T) { + db, closer := helpers.PrepareDBDVO(t) + defer closer() + + dbConn := db.GetConnection() + dbSchema := db.GetDBSchema() + + err := migration.InitInfoTable(dbConn, dbSchema) + helpers.FailOnError(t, err) + + err = migration.SetDBVersion(dbConn, db.GetDBDriverType(), dbSchema, db.GetMaxVersion(), dvomigrations.UsableDVOMigrations) + helpers.FailOnError(t, err) + + // downgrade back to 0 + err = migration.SetDBVersion(dbConn, db.GetDBDriverType(), dbSchema, migration.Version(0), dvomigrations.UsableDVOMigrations) + helpers.FailOnError(t, err) +} diff --git a/migration/dvomigrations/dvo_migrations.go b/migration/dvomigrations/dvo_migrations.go new file mode 100644 index 000000000..11bec316f --- /dev/null +++ b/migration/dvomigrations/dvo_migrations.go @@ -0,0 +1,10 @@ +package dvomigrations + +import "github.com/RedHatInsights/insights-results-aggregator/migration" + +// UsableDVOMigrations contains all usable DVO-related migrations +var UsableDVOMigrations = []migration.Migration{ + mig0001CreateDVOReport, + mig0002CreateDVOReportIndexes, + mig0003CCXDEV12602DeleteBuggyRecords, +} diff --git a/migration/dvomigrations/mig_0001_create_dvo_report.go b/migration/dvomigrations/mig_0001_create_dvo_report.go new file mode 100644 index 000000000..58d96cdcf --- /dev/null +++ b/migration/dvomigrations/mig_0001_create_dvo_report.go @@ -0,0 +1,65 @@ +/* +Copyright © 2020 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dvomigrations + +import ( + "database/sql" + + "github.com/RedHatInsights/insights-results-aggregator/migration" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +var mig0001CreateDVOReport = migration.Migration{ + StepUp: func(tx *sql.Tx, _ types.DBDriver) error { + _, err := tx.Exec(` + CREATE TABLE dvo.dvo_report ( + org_id INTEGER NOT NULL, + cluster_id VARCHAR NOT NULL, + namespace_id VARCHAR NOT NULL, + namespace_name VARCHAR, + report TEXT, + recommendations INTEGER NOT NULL, + objects INTEGER NOT NULL, + reported_at TIMESTAMP, + last_checked_at TIMESTAMP, + PRIMARY KEY(org_id, cluster_id, namespace_id) + ); + `) + if err != nil { + return err + } + + _, err = tx.Exec(` + COMMENT ON TABLE dvo.dvo_report IS 'This table is used as a cache for DVO reports. Only the latest report for a given cluster is stored.'; + COMMENT ON COLUMN dvo.dvo_report.org_id IS 'organization ID'; + COMMENT ON COLUMN dvo.dvo_report.cluster_id IS 'cluster UUID'; + COMMENT ON COLUMN dvo.dvo_report.namespace_id IS 'namespace UUID (always set)'; + COMMENT ON COLUMN dvo.dvo_report.namespace_name IS 'namespace name (might be null - not set)'; + COMMENT ON COLUMN dvo.dvo_report.report IS 'report structure stored in JSON format'; + COMMENT ON COLUMN dvo.dvo_report.recommendations IS 'number of recommendations stored in report'; + COMMENT ON COLUMN dvo.dvo_report.objects IS 'number of objects stored in report'; + COMMENT ON COLUMN dvo.dvo_report.reported_at IS 'timestamp, same meaning as in report table'; + COMMENT ON COLUMN dvo.dvo_report.last_checked_at IS 'timestamp, same meaning as in report table'; + `) + + return err + }, + StepDown: func(tx *sql.Tx, _ types.DBDriver) error { + _, err := tx.Exec(`DROP TABLE dvo.dvo_report;`) + return err + }, +} diff --git a/migration/dvomigrations/mig_0002_create_dvo_report_indexes.go b/migration/dvomigrations/mig_0002_create_dvo_report_indexes.go new file mode 100644 index 000000000..9494429d7 --- /dev/null +++ b/migration/dvomigrations/mig_0002_create_dvo_report_indexes.go @@ -0,0 +1,49 @@ +/* +Copyright © 2020 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dvomigrations + +import ( + "database/sql" + + "github.com/RedHatInsights/insights-results-aggregator/migration" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +var mig0002CreateDVOReportIndexes = migration.Migration{ + StepUp: func(tx *sql.Tx, _ types.DBDriver) error { + _, err := tx.Exec(` + CREATE INDEX report_org_id_idx ON dvo.dvo_report USING HASH (org_id); + CREATE INDEX report_org_id_cluster_id_idx ON dvo.dvo_report (org_id, cluster_id); + `) + if err != nil { + return err + } + + _, err = tx.Exec(` + COMMENT ON INDEX dvo.report_org_id_idx IS 'for Workload page we need to be able to retrieve all records for given organization'; + COMMENT ON INDEX dvo.report_org_id_cluster_id_idx IS 'for Namespace view'; + `) + return err + }, + StepDown: func(tx *sql.Tx, _ types.DBDriver) error { + _, err := tx.Exec(` + DROP INDEX dvo.report_org_id_idx; + DROP INDEX dvo.report_org_id_cluster_id_idx; + `) + return err + }, +} diff --git a/migration/dvomigrations/mig_0003_ccxdev_12602_delete_buggy_records.go b/migration/dvomigrations/mig_0003_ccxdev_12602_delete_buggy_records.go new file mode 100644 index 000000000..7ea74409c --- /dev/null +++ b/migration/dvomigrations/mig_0003_ccxdev_12602_delete_buggy_records.go @@ -0,0 +1,34 @@ +/* +Copyright © 2020 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dvomigrations + +import ( + "database/sql" + + "github.com/RedHatInsights/insights-results-aggregator/migration" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +var mig0003CCXDEV12602DeleteBuggyRecords = migration.Migration{ + StepUp: func(tx *sql.Tx, _ types.DBDriver) error { + _, err := tx.Exec(` + DELETE FROM dvo.dvo_report WHERE last_checked_at <= DATE '2024-03-09'; + `) + return err + }, + StepDown: func(_ *sql.Tx, _ types.DBDriver) error { return nil }, +} diff --git a/migration/export_test.go b/migration/export_test.go index 2e0e16120..9319e00eb 100644 --- a/migration/export_test.go +++ b/migration/export_test.go @@ -26,7 +26,5 @@ package migration // https://medium.com/@robiplus/golang-trick-export-for-test-aa16cbd7b8cd // to see why this trick is needed. var ( - Migrations = &migrations - WithTransaction = withTransaction - Mig0004ModifyClusterRuleUserFeedback = mig0004ModifyClusterRuleUserFeedback + WithTransaction = withTransaction ) diff --git a/migration/helpers.go b/migration/helpers.go index d87fc3776..a0b1d00bf 100644 --- a/migration/helpers.go +++ b/migration/helpers.go @@ -23,7 +23,8 @@ import ( ) const ( - driverUnsupportedErr = "%v driver is not supported" + // DriverUnsupportedErr error message indicating unsupported DB driver + DriverUnsupportedErr = "%v driver is not supported" tableTag = "table" ) @@ -33,15 +34,16 @@ const ( func NewUpdateTableMigration(tableName, previousSchema string, previousColumns []string, newSchema string) Migration { return Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { - return upgradeTable(tx, tableName, newSchema) + return UpgradeTable(tx, tableName, newSchema) }, StepDown: func(tx *sql.Tx, _ types.DBDriver) error { - return downgradeTable(tx, tableName, previousSchema, previousColumns) + return DowngradeTable(tx, tableName, previousSchema, previousColumns) }, } } -func upgradeTable(tx *sql.Tx, tableName, newTableDefinition string) error { +// UpgradeTable runs ALTER TABLE based on the given definition +func UpgradeTable(tx *sql.Tx, tableName, newTableDefinition string) error { // disable "G202 (CWE-89): SQL string concatenation" // #nosec G202 _, err := tx.Exec(`ALTER TABLE ` + tableName + ` RENAME TO tmp;`) @@ -69,9 +71,9 @@ func upgradeTable(tx *sql.Tx, tableName, newTableDefinition string) error { return nil } -// downgradeTable downgrades table to oldTableDefinition, useful for sqlite which doesn't support -// most of alter table queries. Set columns to the list of new columns if you're removing any columns -func downgradeTable(tx *sql.Tx, tableName, oldTableDefinition string, columns []string) error { +// DowngradeTable downgrades table to oldTableDefinition. +// Set columns to the list of new columns if you're removing any columns +func DowngradeTable(tx *sql.Tx, tableName, oldTableDefinition string, columns []string) error { // disable "G202 (CWE-89): SQL string concatenation" // #nosec G202 _, err := tx.Exec(`ALTER TABLE ` + tableName + ` RENAME TO tmp;`) @@ -104,7 +106,8 @@ func downgradeTable(tx *sql.Tx, tableName, oldTableDefinition string, columns [] return nil } -func updateTableData(tx *sql.Tx, table, query string, args ...interface{}) error { +// UpdateTableData updates data and checks rows affected +func UpdateTableData(tx *sql.Tx, table, query string, args ...interface{}) error { log.Debug().Str(tableTag, table).Msg("Updating rows...") result, err := tx.Exec(query, args...) diff --git a/migration/migration.go b/migration/migration.go index 49fa76448..c618f8f13 100644 --- a/migration/migration.go +++ b/migration/migration.go @@ -29,9 +29,14 @@ import ( "github.com/RedHatInsights/insights-results-aggregator/types" ) +const defaultDBSchema = "public" + // Version represents a version of the database. type Version uint +// Schema represents the used schema of the database. +type Schema string + // Step represents an action performed to either increase // or decrease the migration version of the database. type Step func(tx *sql.Tx, driver types.DBDriver) error @@ -42,44 +47,67 @@ type Migration struct { StepDown Step } -const ( - ruleErrorKeyTable = "rule_error_key" - clusterRuleUserFeedbackTable = "cluster_rule_user_feedback" - clusterReportTable = "report" - clusterRuleToggleTable = "cluster_rule_toggle" - clusterUserRuleDisableFeedbackTable = "cluster_user_rule_disable_feedback" - alterTableDropColumnQuery = "ALTER TABLE %v DROP COLUMN IF EXISTS %v" - alterTableAddVarcharColumn = "ALTER TABLE %v ADD COLUMN %v VARCHAR NOT NULL DEFAULT '-1'" - alterTableDropPK = "ALTER TABLE %v DROP CONSTRAINT IF EXISTS %v_pkey" - alterTableAddPK = "ALTER TABLE %v ADD CONSTRAINT %v_pkey PRIMARY KEY %v" - userIDColumn = "user_id" -) +// InitDBSchema ensures that a given database schema exists. +// If it already exists, no changes will be made to the database, +// otherwise it will attempt to create the schema. +func InitDBSchema(db *sql.DB, schema Schema) error { + return withTransaction(db, func(tx *sql.Tx) error { + if schema == "" { + schema = defaultDBSchema + } + + // #nosec G201 + _, err := tx.Exec(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %v;", schema)) + if err != nil { + return err + } + + var schemaExists bool + err = tx.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.schemata WHERE schema_name = $1);", schema).Scan(&schemaExists) + if err != nil { + return err + } -// GetMaxVersion returns the highest available migration version. -// The DB version cannot be set to a value higher than this. -// This value is equivalent to the length of the list of available migrations. -func GetMaxVersion() Version { - return Version(len(migrations)) + if !schemaExists { + return fmt.Errorf("unable to create schema %v", schema) + } + + return nil + }) } // InitInfoTable ensures that the migration information table is created. // If it already exists, no changes will be made to the database. // Otherwise, a new migration information table will be created and initialized. -func InitInfoTable(db *sql.DB) error { +func InitInfoTable(db *sql.DB, schema Schema) error { return withTransaction(db, func(tx *sql.Tx) error { - _, err := tx.Exec("CREATE TABLE IF NOT EXISTS migration_info (version INTEGER NOT NULL);") + if schema == "" { + schema = defaultDBSchema + } + + // #nosec G201 + _, err := tx.Exec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v.migration_info (version INTEGER NOT NULL);", schema)) if err != nil { return err } // INSERT if there's no rows in the table - _, err = tx.Exec("INSERT INTO migration_info (version) SELECT 0 WHERE NOT EXISTS (SELECT version FROM migration_info);") + // #nosec G201 + _, err = tx.Exec( + fmt.Sprintf( + "INSERT INTO %v.migration_info (version) SELECT 0 WHERE NOT EXISTS (SELECT version FROM %v.migration_info);", + schema, + schema, + ), + ) if err != nil { return err } var rowCount uint - err = tx.QueryRow("SELECT COUNT(*) FROM migration_info;").Scan(&rowCount) + + // #nosec G201 + err = tx.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %v.migration_info;", schema)).Scan(&rowCount) if err != nil { return err } @@ -93,14 +121,21 @@ func InitInfoTable(db *sql.DB) error { } // GetDBVersion reads the current version of the database from the migration info table. -func GetDBVersion(db *sql.DB) (Version, error) { - err := validateNumberOfRows(db) +func GetDBVersion(db *sql.DB, schema Schema) (Version, error) { + if schema == "" { + schema = defaultDBSchema + } + + err := validateNumberOfRows(db, schema) if err != nil { return 0, err } + // #nosec G201 + query := fmt.Sprintf("SELECT version FROM %v.migration_info;", schema) + var version Version // version 0 by default - err = db.QueryRow("SELECT version FROM migration_info;").Scan(&version) + err = db.QueryRow(query).Scan(&version) err = types.ConvertDBError(err, nil) return version, err @@ -108,14 +143,24 @@ func GetDBVersion(db *sql.DB) (Version, error) { // SetDBVersion attempts to get the database into the specified // target version using available migration steps. -func SetDBVersion(db *sql.DB, dbDriver types.DBDriver, targetVer Version) error { - maxVer := GetMaxVersion() +func SetDBVersion( + db *sql.DB, + dbDriver types.DBDriver, + dbSchema Schema, + targetVer Version, + migrations []Migration, +) error { + if dbSchema == "" { + dbSchema = defaultDBSchema + } + + maxVer := Version(len(migrations)) if targetVer > maxVer { return fmt.Errorf("invalid target version (available version range is 0-%d)", maxVer) } // Get current database version. - currentVer, err := GetDBVersion(db) + currentVer, err := GetDBVersion(db, dbSchema) if err != nil { return err } @@ -125,13 +170,14 @@ func SetDBVersion(db *sql.DB, dbDriver types.DBDriver, targetVer Version) error return fmt.Errorf("current version (%d) is outside of available migration boundaries", currentVer) } - return execStepsInTx(db, dbDriver, currentVer, targetVer) + return execStepsInTx(db, dbDriver, dbSchema, currentVer, targetVer, migrations) } // updateVersionInDB updates the migration version number in the migration info table. // This function does NOT rollback in case of an error. The calling function is expected to do that. -func updateVersionInDB(tx *sql.Tx, newVersion Version) error { - res, err := tx.Exec("UPDATE migration_info SET version=$1;", newVersion) +func updateVersionInDB(tx *sql.Tx, schema Schema, newVersion Version) error { + // #nosec G201 + res, err := tx.Exec(fmt.Sprintf("UPDATE %v.migration_info SET version=$1;", schema), newVersion) if err != nil { return err } @@ -150,7 +196,14 @@ func updateVersionInDB(tx *sql.Tx, newVersion Version) error { } // execStepsInTx executes the necessary migration steps in a single transaction. -func execStepsInTx(db *sql.DB, dbDriver types.DBDriver, currentVer, targetVer Version) error { +func execStepsInTx( + db *sql.DB, + dbDriver types.DBDriver, + dbSchema Schema, + currentVer, + targetVer Version, + migrations []Migration, +) error { // Already at target version. if currentVer == targetVer { return nil @@ -175,12 +228,12 @@ func execStepsInTx(db *sql.DB, dbDriver types.DBDriver, currentVer, targetVer Ve currentVer-- } - return updateVersionInDB(tx, currentVer) + return updateVersionInDB(tx, dbSchema, currentVer) }) } -func validateNumberOfRows(db *sql.DB) error { - numberOfRows, err := getNumberOfRows(db) +func validateNumberOfRows(db *sql.DB, schema Schema) error { + numberOfRows, err := getNumberOfRows(db, schema) if err != nil { return err } @@ -191,9 +244,10 @@ func validateNumberOfRows(db *sql.DB) error { return nil } -func getNumberOfRows(db *sql.DB) (uint, error) { +func getNumberOfRows(db *sql.DB, schema Schema) (uint, error) { var count uint - err := db.QueryRow("SELECT COUNT(*) FROM migration_info;").Scan(&count) + // #nosec G201 + err := db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %v.migration_info;", schema)).Scan(&count) err = types.ConvertDBError(err, nil) return count, err } diff --git a/migration/migration_test.go b/migration/migration_test.go index 6abf2e983..4d9195227 100644 --- a/migration/migration_test.go +++ b/migration/migration_test.go @@ -24,19 +24,17 @@ import ( "github.com/DATA-DOG/go-sqlmock" "github.com/RedHatInsights/insights-operator-utils/tests/helpers" - _ "github.com/mattn/go-sqlite3" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/RedHatInsights/insights-results-aggregator/migration" - "github.com/RedHatInsights/insights-results-aggregator/storage" ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" "github.com/RedHatInsights/insights-results-aggregator/types" ) const ( dbClosedErrorMsg = "sql: database is closed" - noSuchTableErrorMsg = "no such table: migration_info" + noSuchTableErrorMsg = "no such table: public.migration_info" stepErrorMsg = "migration Step Error" ) @@ -60,109 +58,119 @@ var ( return err }, } + testMigrations = []migration.Migration{testMigration} ) func init() { zerolog.SetGlobalLevel(zerolog.WarnLevel) } -func prepareDB(t *testing.T) (*sql.DB, types.DBDriver, func()) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) - dbStorage := mockStorage.(*storage.DBStorage) +// TestMigrationInit checks that database migration table initialization succeeds. +func TestMigrationInit(t *testing.T) { + db, closer := ira_helpers.PrepareDB(t) + defer closer() + + dbConn := db.GetConnection() + dbSchema := db.GetDBSchema() + err := migration.InitInfoTable(dbConn, dbSchema) + helpers.FailOnError(t, err) - return dbStorage.GetConnection(), dbStorage.GetDBDriverType(), closer + _, err = migration.GetDBVersion(dbConn, dbSchema) + helpers.FailOnError(t, err) } -func prepareDBAndInfo(t *testing.T) (*sql.DB, types.DBDriver, func()) { - db, dbDriver, closer := prepareDB(t) +func TestMigrationInitDBSchema(t *testing.T) { + db, closer := ira_helpers.PrepareDB(t) + defer closer() - if err := migration.InitInfoTable(db); err != nil { - closer() - t.Fatal(err) - } + dbConn := db.GetConnection() + dbSchema := db.GetDBSchema() + err := migration.InitDBSchema(dbConn, dbSchema) + helpers.FailOnError(t, err) - return db, dbDriver, closer -} + err = migration.InitInfoTable(dbConn, dbSchema) + helpers.FailOnError(t, err) -func prepareDBAndMigrations(t *testing.T) (*sql.DB, types.DBDriver, func()) { - *migration.Migrations = []migration.Migration{testMigration} - return prepareDBAndInfo(t) + _, err = migration.GetDBVersion(dbConn, dbSchema) + helpers.FailOnError(t, err) } -// TestMigrationFull tests majority of the migration -// mechanism's functionality, all in one place. -func TestMigrationFull(t *testing.T) { - // Don't overwrite the migration list, use the real migrations. - db, dbDriver, closer := prepareDBAndInfo(t) +func TestMigrationInitDBSchemaMultipleTimes(t *testing.T) { + db, closer := ira_helpers.PrepareDB(t) defer closer() - maxVer := migration.GetMaxVersion() - assert.NotEqual(t, 0, maxVer, "no migrations available") - - currentVer, err := migration.GetDBVersion(db) + dbConn := db.GetConnection() + dbSchema := db.GetDBSchema() + err := migration.InitDBSchema(dbConn, dbSchema) helpers.FailOnError(t, err) - assert.Equal(t, migration.Version(0), currentVer, "unexpected version") - - stepUpAndDown(t, db, dbDriver, maxVer, 0) -} - -func stepUpAndDown(t *testing.T, db *sql.DB, dbDriver types.DBDriver, upVer, downVer migration.Version) { - err := migration.SetDBVersion(db, dbDriver, upVer) + err = migration.InitInfoTable(dbConn, dbSchema) helpers.FailOnError(t, err) - currentVer, err := migration.GetDBVersion(db) + _, err = migration.GetDBVersion(dbConn, dbSchema) helpers.FailOnError(t, err) - assert.Equal(t, upVer, currentVer, "unexpected version") - err = migration.SetDBVersion(db, dbDriver, 0) + // running again must be idempotent + err = migration.InitDBSchema(dbConn, dbSchema) helpers.FailOnError(t, err) - currentVer, err = migration.GetDBVersion(db) + _, err = migration.GetDBVersion(dbConn, dbSchema) helpers.FailOnError(t, err) - assert.Equal(t, downVer, currentVer, "unexpected version") } -// TestMigrationInit checks that database migration table initialization succeeds. -func TestMigrationInit(t *testing.T) { - db, _, closer := prepareDB(t) +// TestMigrationInitDBSchemaEmptySchema must work with empty schema (uses default "public") +func TestMigrationInitDBSchemaEmptySchema(t *testing.T) { + db, closer := ira_helpers.PrepareDB(t) defer closer() - err := migration.InitInfoTable(db) + dbConn := db.GetConnection() + err := migration.InitDBSchema(dbConn, "") + helpers.FailOnError(t, err) + + err = migration.InitInfoTable(dbConn, "") helpers.FailOnError(t, err) - _, err = migration.GetDBVersion(db) + _, err = migration.GetDBVersion(dbConn, "") helpers.FailOnError(t, err) } +func TestMigrationInitDBSchemaWrongSchema(t *testing.T) { + db, closer := ira_helpers.PrepareDB(t) + defer closer() + + dbConn := db.GetConnection() + err := migration.InitDBSchema(dbConn, "-1") + assert.Error(t, err) +} + // TestMigrationReInit checks that an attempt to re-initialize an already initialized // migration info table will simply result in a no-op without any error. func TestMigrationReInit(t *testing.T) { - db, _, closer := prepareDBAndMigrations(t) + dbConn, _, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - err := migration.InitInfoTable(db) + err := migration.InitInfoTable(dbConn, dbSchema) helpers.FailOnError(t, err) } func TestMigrationInitNotOneRow(t *testing.T) { - db, _, closer := prepareDBAndMigrations(t) + dbConn, _, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - _, err := db.Exec("INSERT INTO migration_info(version) VALUES(10);") + _, err := dbConn.Exec("INSERT INTO public.migration_info(version) VALUES(10);") helpers.FailOnError(t, err) const expectedErrStr = "unexpected number of rows in migration info table (expected: 1, reality: 2)" - err = migration.InitInfoTable(db) + err = migration.InitInfoTable(dbConn, dbSchema) assert.EqualError(t, err, expectedErrStr) } // TestMigrationGetVersion checks that the initial database migration version is 0. func TestMigrationGetVersion(t *testing.T) { - db, _, closer := prepareDBAndMigrations(t) + dbConn, _, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - version, err := migration.GetDBVersion(db) + version, err := migration.GetDBVersion(dbConn, dbSchema) helpers.FailOnError(t, err) assert.Equal(t, migration.Version(0), version, "unexpected database version") @@ -170,137 +178,139 @@ func TestMigrationGetVersion(t *testing.T) { func TestMigrationGetVersionMissingInfoTable(t *testing.T) { // Prepare DB without preparing the migration info table. - db, _, closer := prepareDB(t) + db, closer := ira_helpers.PrepareDB(t) defer closer() - _, err := migration.GetDBVersion(db) + _, err := migration.GetDBVersion(db.GetConnection(), db.GetDBSchema()) assert.EqualError(t, err, noSuchTableErrorMsg) } func TestMigrationGetVersionMultipleRows(t *testing.T) { - db, _, closer := prepareDBAndMigrations(t) + dbConn, _, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - _, err := db.Exec("INSERT INTO migration_info(version) VALUES(10);") + _, err := dbConn.Exec("INSERT INTO public.migration_info(version) VALUES(10);") helpers.FailOnError(t, err) - _, err = migration.GetDBVersion(db) + _, err = migration.GetDBVersion(dbConn, dbSchema) assert.EqualError(t, err, "migration info table contain 2 rows") } func TestMigrationGetVersionEmptyTable(t *testing.T) { - db, _, closer := prepareDBAndMigrations(t) + dbConn, _, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - _, err := db.Exec("DELETE FROM migration_info;") + _, err := dbConn.Exec("DELETE FROM migration_info;") helpers.FailOnError(t, err) - _, err = migration.GetDBVersion(db) + _, err = migration.GetDBVersion(dbConn, dbSchema) assert.EqualError(t, err, "migration info table contain 0 rows") } func TestMigrationGetVersionInvalidType(t *testing.T) { - db, _, closer := prepareDB(t) + db, closer := ira_helpers.PrepareDB(t) defer closer() - _, err := db.Exec("CREATE TABLE migration_info ( version TEXT );") + dbConn := db.GetConnection() + + _, err := dbConn.Exec("CREATE TABLE public.migration_info ( version TEXT );") helpers.FailOnError(t, err) - _, err = db.Exec("INSERT INTO migration_info(version) VALUES('hello world');") + _, err = dbConn.Exec("INSERT INTO public.migration_info(version) VALUES('hello world');") helpers.FailOnError(t, err) const expectedErrStr = `sql: Scan error on column index 0, name "version": ` + `converting driver.Value type string ("hello world") to a uint: invalid syntax` - _, err = migration.GetDBVersion(db) + _, err = migration.GetDBVersion(dbConn, db.GetDBSchema()) assert.EqualError(t, err, expectedErrStr) } // TestMigrationSetVersion checks that it is possible to change // the database version in both direction (upgrade and downgrade). func TestMigrationSetVersion(t *testing.T) { - db, dbDriver, closer := prepareDBAndMigrations(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() // Step-up from 0 to 1. - err := migration.SetDBVersion(db, dbDriver, 1) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 1, testMigrations) helpers.FailOnError(t, err) - version, err := migration.GetDBVersion(db) + version, err := migration.GetDBVersion(dbConn, dbSchema) helpers.FailOnError(t, err) assert.Equal(t, migration.Version(1), version, "unexpected database version") // Step-down from 1 to 0. - err = migration.SetDBVersion(db, dbDriver, 0) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, testMigrations) helpers.FailOnError(t, err) - version, err = migration.GetDBVersion(db) + version, err = migration.GetDBVersion(dbConn, dbSchema) helpers.FailOnError(t, err) assert.Equal(t, migration.Version(0), version, "unexpected database version") } func TestMigrationNoInfoTable(t *testing.T) { - db, _, closer := prepareDB(t) + db, closer := ira_helpers.PrepareDB(t) defer closer() // Intentionally missing info table initialization here. - _, err := migration.GetDBVersion(db) + _, err := migration.GetDBVersion(db.GetConnection(), db.GetDBSchema()) assert.EqualError( t, err, noSuchTableErrorMsg, "migration info table should be missing when not initialized", ) } func TestMigrationSetVersionSame(t *testing.T) { - db, dbDriver, closer := prepareDBAndMigrations(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() // Step-up from 0 to 1. - err := migration.SetDBVersion(db, dbDriver, 1) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 1, testMigrations) helpers.FailOnError(t, err) // Set version to. - err = migration.SetDBVersion(db, dbDriver, 1) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 1, testMigrations) helpers.FailOnError(t, err) - version, err := migration.GetDBVersion(db) + version, err := migration.GetDBVersion(dbConn, dbSchema) helpers.FailOnError(t, err) assert.Equal(t, migration.Version(1), version, "unexpected database version") } func TestMigrationSetVersionTargetTooHigh(t *testing.T) { - db, dbDriver, closer := prepareDBAndMigrations(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() // Step-up from 0 to 2 (impossible -- only 1 migration is available). - err := migration.SetDBVersion(db, dbDriver, 2) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 2, testMigrations) assert.EqualError(t, err, "invalid target version (available version range is 0-1)") } // TestMigrationSetVersionUpError checks that an error during a step-up is correctly handled. func TestMigrationSetVersionUpError(t *testing.T) { - db, dbDriver, closer := prepareDBAndMigrations(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - *migration.Migrations = []migration.Migration{ + tMigrations := []migration.Migration{ { StepUp: stepErrorFn, StepDown: stepNoopFn, }, } - err := migration.SetDBVersion(db, dbDriver, 1) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 1, tMigrations) assert.EqualError(t, err, stepErrorMsg) } // TestMigrationSetVersionDownError checks that an error during a step-down is correctly handled. func TestMigrationSetVersionDownError(t *testing.T) { - db, dbDriver, closer := prepareDBAndMigrations(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - *migration.Migrations = []migration.Migration{ + tMigrations := []migration.Migration{ { StepUp: stepNoopFn, StepDown: stepErrorFn, @@ -308,72 +318,74 @@ func TestMigrationSetVersionDownError(t *testing.T) { } // First we need to step-up before we can step-down. - err := migration.SetDBVersion(db, dbDriver, 1) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 1, tMigrations) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, 0) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, tMigrations) assert.EqualError(t, err, stepErrorMsg) } // TestMigrationSetVersionCurrentTooHighError makes sure that if the current DB version // is outside of the available migration range, it is reported as an error. func TestMigrationSetVersionCurrentTooHighError(t *testing.T) { - db, dbDriver, closer := prepareDBAndMigrations(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - _, err := db.Exec("UPDATE migration_info SET version=10;") + _, err := dbConn.Exec("UPDATE public.migration_info SET version=10;") helpers.FailOnError(t, err) const expectedErrStr = "current version (10) is outside of available migration boundaries" - err = migration.SetDBVersion(db, dbDriver, 0) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, testMigrations) assert.EqualError(t, err, expectedErrStr) } func TestMigrationInitClosedDB(t *testing.T) { - db, _, closer := prepareDB(t) + db, closer := ira_helpers.PrepareDB(t) // Intentionally no `defer` here. closer() - err := migration.InitInfoTable(db) + err := migration.InitInfoTable(db.GetConnection(), db.GetDBSchema()) assert.EqualError(t, err, dbClosedErrorMsg) } func TestMigrationGetVersionClosedDB(t *testing.T) { - db, _, closer := prepareDBAndMigrations(t) + dbConn, _, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) // Intentionally no `defer` here. closer() - _, err := migration.GetDBVersion(db) + _, err := migration.GetDBVersion(dbConn, dbSchema) assert.EqualError(t, err, dbClosedErrorMsg) } func TestMigrationSetVersionClosedDB(t *testing.T) { - db, dbDriver, closer := prepareDBAndMigrations(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) // Intentionally no `defer` here. closer() - err := migration.SetDBVersion(db, dbDriver, 0) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, testMigrations) assert.EqualError(t, err, dbClosedErrorMsg) } func TestMigrationInitRollbackStep(t *testing.T) { - db, dbDriver, closer := prepareDBAndMigrations(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - *migration.Migrations = []migration.Migration{{ - StepUp: stepRollbackFn, - StepDown: stepNoopFn, - }} + tMigrations := []migration.Migration{ + { + StepUp: stepRollbackFn, + StepDown: stepNoopFn, + }, + } const expectedErrStr = "sql: transaction has already been committed or rolled back" - err := migration.SetDBVersion(db, dbDriver, 1) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 1, tMigrations) assert.EqualError(t, err, expectedErrStr) } func TestInitInfoTable_BeginTransactionDBError(t *testing.T) { - db, _, closer := prepareDB(t) + db, closer := ira_helpers.PrepareDB(t) closer() - err := migration.InitInfoTable(db) + err := migration.InitInfoTable(db.GetConnection(), db.GetDBSchema()) assert.EqualError(t, err, "sql: database is closed") } @@ -384,10 +396,10 @@ func TestInitInfoTable_InitTableDBError(t *testing.T) { defer ira_helpers.MustCloseMockDBWithExpects(t, db, expects) expects.ExpectBegin() - expects.ExpectExec("CREATE TABLE IF NOT EXISTS migration_info").WillReturnError(fmt.Errorf(errStr)) + expects.ExpectExec("CREATE TABLE IF NOT EXISTS public.migration_info").WillReturnError(fmt.Errorf(errStr)) expects.ExpectRollback() - err := migration.InitInfoTable(db) + err := migration.InitInfoTable(db, "") assert.EqualError(t, err, errStr) } @@ -398,11 +410,11 @@ func TestInitInfoTable_InitVersionDBError(t *testing.T) { defer ira_helpers.MustCloseMockDBWithExpects(t, db, expects) expects.ExpectBegin() - expects.ExpectExec("CREATE TABLE IF NOT EXISTS migration_info").WillReturnResult(sql_driver.ResultNoRows) - expects.ExpectExec("INSERT INTO migration_info").WillReturnError(fmt.Errorf(errStr)) + expects.ExpectExec("CREATE TABLE IF NOT EXISTS public.migration_info").WillReturnResult(sql_driver.ResultNoRows) + expects.ExpectExec("INSERT INTO public.migration_info").WillReturnError(fmt.Errorf(errStr)) expects.ExpectRollback() - err := migration.InitInfoTable(db) + err := migration.InitInfoTable(db, "") assert.EqualError(t, err, errStr) } @@ -413,36 +425,33 @@ func TestInitInfoTable_CountDBError(t *testing.T) { defer ira_helpers.MustCloseMockDBWithExpects(t, db, expects) expects.ExpectBegin() - expects.ExpectExec("CREATE TABLE IF NOT EXISTS migration_info").WillReturnResult(sql_driver.ResultNoRows) - expects.ExpectExec("INSERT INTO migration_info").WillReturnResult(sql_driver.ResultNoRows) - expects.ExpectQuery("SELECT COUNT.+FROM migration_info").WillReturnError(fmt.Errorf(errStr)) + expects.ExpectExec("CREATE TABLE IF NOT EXISTS public.migration_info").WillReturnResult(sql_driver.ResultNoRows) + expects.ExpectExec("INSERT INTO public.migration_info").WillReturnResult(sql_driver.ResultNoRows) + expects.ExpectQuery("SELECT COUNT.+FROM public.migration_info").WillReturnError(fmt.Errorf(errStr)) expects.ExpectRollback() - err := migration.InitInfoTable(db) + err := migration.InitInfoTable(db, "") assert.EqualError(t, err, errStr) } func updateVersionInDBCommon(t *testing.T) (*sql.DB, sqlmock.Sqlmock) { - // set test migrations - *migration.Migrations = []migration.Migration{testMigration} - db, expects := ira_helpers.MustGetMockDBWithExpects(t) expects.ExpectBegin() - expects.ExpectExec("CREATE TABLE IF NOT EXISTS migration_info").WillReturnResult(sql_driver.ResultNoRows) - expects.ExpectExec("INSERT INTO migration_info").WillReturnResult(sql_driver.ResultNoRows) - expects.ExpectQuery("SELECT COUNT.+FROM migration_info").WillReturnRows( + expects.ExpectExec("CREATE TABLE IF NOT EXISTS public.migration_info").WillReturnResult(sql_driver.ResultNoRows) + expects.ExpectExec("INSERT INTO public.migration_info").WillReturnResult(sql_driver.ResultNoRows) + expects.ExpectQuery("SELECT COUNT.+FROM public.migration_info").WillReturnRows( sqlmock.NewRows([]string{"version"}).AddRow(1), ) expects.ExpectCommit() - err := migration.InitInfoTable(db) + err := migration.InitInfoTable(db, "") helpers.FailOnError(t, err) - expects.ExpectQuery("SELECT COUNT.+FROM migration_info").WillReturnRows( + expects.ExpectQuery("SELECT COUNT.+FROM public.migration_info").WillReturnRows( sqlmock.NewRows([]string{"version"}).AddRow(1), ) - expects.ExpectQuery("SELECT version FROM migration_info").WillReturnRows( + expects.ExpectQuery("SELECT version FROM public.migration_info").WillReturnRows( sqlmock.NewRows([]string{"version"}).AddRow(0), ) expects.ExpectBegin() @@ -457,11 +466,11 @@ func TestUpdateVersionInDB_RowsAffectedError(t *testing.T) { db, expects := updateVersionInDBCommon(t) defer ira_helpers.MustCloseMockDBWithExpects(t, db, expects) - expects.ExpectExec("UPDATE migration_info SET version"). + expects.ExpectExec("UPDATE public.migration_info SET version"). WithArgs(1). WillReturnResult(sqlmock.NewErrorResult(fmt.Errorf(errStr))) - err := migration.SetDBVersion(db, types.DBDriverGeneral, migration.GetMaxVersion()) + err := migration.SetDBVersion(db, types.DBDriverGeneral, "", 1, testMigrations) assert.EqualError(t, err, errStr) } @@ -469,14 +478,11 @@ func TestUpdateVersionInDB_MoreThan1RowAffected(t *testing.T) { db, expects := updateVersionInDBCommon(t) defer ira_helpers.MustCloseMockDBWithExpects(t, db, expects) - expects.ExpectExec("UPDATE migration_info SET version"). + expects.ExpectExec("UPDATE public.migration_info SET version"). WithArgs(1). WillReturnResult(sqlmock.NewResult(1, 2)) - // set test migrations - *migration.Migrations = []migration.Migration{testMigration} - - err := migration.SetDBVersion(db, types.DBDriverGeneral, migration.GetMaxVersion()) + err := migration.SetDBVersion(db, types.DBDriverGeneral, "", 1, testMigrations) assert.EqualError( t, err, "unexpected number of affected rows in migration info table (expected: 1, reality: 2)", ) diff --git a/migration/actual_migrations_test.go b/migration/ocpmigrations/actual_migrations_test.go similarity index 68% rename from migration/actual_migrations_test.go rename to migration/ocpmigrations/actual_migrations_test.go index f5fc0b28a..29908ed61 100644 --- a/migration/actual_migrations_test.go +++ b/migration/ocpmigrations/actual_migrations_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package migration_test +package ocpmigrations_test import ( "database/sql" @@ -27,11 +27,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/RedHatInsights/insights-results-aggregator/migration" + "github.com/RedHatInsights/insights-results-aggregator/migration/ocpmigrations" "github.com/RedHatInsights/insights-results-aggregator/storage" ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" "github.com/RedHatInsights/insights-results-aggregator/types" ) +func getMaxVersion() migration.Version { + return migration.Version(len(ocpmigrations.UsableOCPMigrations)) +} + func GetTxForMigration(t *testing.T) (*sql.Tx, *sql.DB, sqlmock.Sqlmock) { db, expects := ira_helpers.MustGetMockDBWithExpects(t) @@ -44,180 +49,181 @@ func GetTxForMigration(t *testing.T) (*sql.Tx, *sql.DB, sqlmock.Sqlmock) { } func TestAllMigrations(t *testing.T) { - db, dbDriver, closer := prepareDB(t) + db, closer := ira_helpers.PrepareDB(t) defer closer() - err := migration.InitInfoTable(db) + dbConn := db.GetConnection() + dbSchema := db.GetDBSchema() + + err := migration.InitInfoTable(dbConn, dbSchema) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, migration.GetMaxVersion()) + err = migration.SetDBVersion(dbConn, db.GetDBDriverType(), dbSchema, getMaxVersion(), ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) } func TestMigrationsOneByOne(t *testing.T) { - db, dbDriver, closer := prepareDB(t) + db, closer := ira_helpers.PrepareDB(t) defer closer() - allMigrations := make([]migration.Migration, len(*migration.Migrations)) - copy(allMigrations, *migration.Migrations) - *migration.Migrations = []migration.Migration{} + dbConn := db.GetConnection() + dbSchema := db.GetDBSchema() + + allMigrations := make([]migration.Migration, len(ocpmigrations.UsableOCPMigrations)) + copy(allMigrations, ocpmigrations.UsableOCPMigrations) + testMigrations := []migration.Migration{} for i := 0; i < len(allMigrations); i++ { // add one migration to the list - *migration.Migrations = append(*migration.Migrations, allMigrations[i]) + testMigrations = append(testMigrations, allMigrations[i]) - err := migration.InitInfoTable(db) + err := migration.InitInfoTable(dbConn, dbSchema) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, migration.GetMaxVersion()) + err = migration.SetDBVersion(dbConn, db.GetDBDriverType(), dbSchema, migration.Version(i), testMigrations) helpers.FailOnError(t, err) } } func TestMigration1_TableReportAlreadyExists(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - _, err := db.Exec(`CREATE TABLE report(c INTEGER);`) + _, err := dbConn.Exec(`CREATE TABLE report(c INTEGER);`) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, migration.GetMaxVersion()) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, getMaxVersion(), ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "table report already exists") } func TestMigration1_TableReportDoesNotExist(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() // set to the version with the report table - err := migration.SetDBVersion(db, dbDriver, 1) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 1, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(`DROP TABLE report;`) + _, err = dbConn.Exec(`DROP TABLE report;`) helpers.FailOnError(t, err) // try to set to the first version - err = migration.SetDBVersion(db, dbDriver, 0) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "no such table: report") } func TestMigration2_TableRuleAlreadyExists(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - _, err := db.Exec(`CREATE TABLE rule(c INTEGER);`) + _, err := dbConn.Exec(`CREATE TABLE rule(c INTEGER);`) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, migration.GetMaxVersion()) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, getMaxVersion(), ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "table rule already exists") } func TestMigration2_TableRuleDoesNotExist(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() // set to the version where table rule exists - err := migration.SetDBVersion(db, dbDriver, 2) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 2, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - if dbDriver == types.DBDriverSQLite3 { - _, err = db.Exec(`DROP TABLE rule;`) - helpers.FailOnError(t, err) - } else { - _, err = db.Exec(`DROP TABLE rule CASCADE;`) - helpers.FailOnError(t, err) - } + _, err = dbConn.Exec(`DROP TABLE rule CASCADE;`) + helpers.FailOnError(t, err) // try to set to the first version - err = migration.SetDBVersion(db, dbDriver, 0) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "no such table: rule") } func TestMigration2_TableRuleErrorKeyAlreadyExists(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - _, err := db.Exec(`CREATE TABLE rule_error_key(c INTEGER);`) + _, err := dbConn.Exec(`CREATE TABLE rule_error_key(c INTEGER);`) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, migration.GetMaxVersion()) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, getMaxVersion(), ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "table rule_error_key already exists") } func TestMigration2_TableRuleErrorKeyDoesNotExist(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() // set to the latest version - err := migration.SetDBVersion(db, dbDriver, 2) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 2, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(`DROP TABLE rule_error_key;`) + _, err = dbConn.Exec(`DROP TABLE rule_error_key;`) helpers.FailOnError(t, err) // try to set to the first version - err = migration.SetDBVersion(db, dbDriver, 0) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "no such table: rule_error_key") } func TestMigration3_TableClusterRuleUserFeedbackAlreadyExists(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - _, err := db.Exec(`CREATE TABLE cluster_rule_user_feedback(c INTEGER);`) + _, err := dbConn.Exec(`CREATE TABLE cluster_rule_user_feedback(c INTEGER);`) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, migration.GetMaxVersion()) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, getMaxVersion(), ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "table cluster_rule_user_feedback already exists") } func TestMigration3_TableClusterRuleUserFeedbackDoesNotExist(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() // set to the latest version - err := migration.SetDBVersion(db, dbDriver, 3) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 3, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(`DROP TABLE cluster_rule_user_feedback;`) + _, err = dbConn.Exec(`DROP TABLE cluster_rule_user_feedback;`) helpers.FailOnError(t, err) // try to set to the first version - err = migration.SetDBVersion(db, dbDriver, 0) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "no such table: cluster_rule_user_feedback") } func TestMigration4_StepUp_TableClusterRuleUserFeedbackDoesNotExist(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - err := migration.SetDBVersion(db, dbDriver, 3) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 3, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(`DROP TABLE cluster_rule_user_feedback;`) + _, err = dbConn.Exec(`DROP TABLE cluster_rule_user_feedback;`) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, migration.GetMaxVersion()) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, getMaxVersion(), ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "no such table: cluster_rule_user_feedback") } func TestMigration4_StepDown_TableClusterRuleUserFeedbackDoesNotExist(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - err := migration.SetDBVersion(db, dbDriver, 4) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 4, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(`DROP TABLE cluster_rule_user_feedback;`) + _, err = dbConn.Exec(`DROP TABLE cluster_rule_user_feedback;`) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, 0) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "no such table: cluster_rule_user_feedback") } func TestMigration4_CreateTableError(t *testing.T) { expectedErr := fmt.Errorf("create table error") - mig4 := migration.Mig0004ModifyClusterRuleUserFeedback + mig4 := ocpmigrations.Mig0004ModifyClusterRuleUserFeedback for _, method := range []func(*sql.Tx, types.DBDriver) error{mig4.StepUp, mig4.StepDown} { func(method func(*sql.Tx, types.DBDriver) error) { @@ -237,7 +243,7 @@ func TestMigration4_CreateTableError(t *testing.T) { func TestMigration4_InsertError(t *testing.T) { expectedErr := fmt.Errorf("insert error") - mig4 := migration.Mig0004ModifyClusterRuleUserFeedback + mig4 := ocpmigrations.Mig0004ModifyClusterRuleUserFeedback for _, method := range []func(*sql.Tx, types.DBDriver) error{mig4.StepUp, mig4.StepDown} { func(method func(*sql.Tx, types.DBDriver) error) { @@ -259,7 +265,7 @@ func TestMigration4_InsertError(t *testing.T) { func TestMigration4_DropTableError(t *testing.T) { expectedErr := fmt.Errorf("drop table error") - mig4 := migration.Mig0004ModifyClusterRuleUserFeedback + mig4 := ocpmigrations.Mig0004ModifyClusterRuleUserFeedback for _, method := range []func(*sql.Tx, types.DBDriver) error{mig4.StepUp, mig4.StepDown} { func(method func(*sql.Tx, types.DBDriver) error) { @@ -282,43 +288,38 @@ func TestMigration4_DropTableError(t *testing.T) { } func TestMigration5_TableAlreadyExists(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - _, err := db.Exec("CREATE TABLE consumer_error(c INTEGER);") + _, err := dbConn.Exec("CREATE TABLE consumer_error(c INTEGER);") helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, migration.GetMaxVersion()) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, getMaxVersion(), ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "table consumer_error already exists") } func TestMigration5_NoSuchTable(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - err := migration.SetDBVersion(db, dbDriver, 5) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 5, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(`DROP TABLE consumer_error`) + _, err = dbConn.Exec(`DROP TABLE consumer_error`) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, 0) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 0, ocpmigrations.UsableOCPMigrations) assert.EqualError(t, err, "no such table: consumer_error") } func TestMigration13(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // migration is not implemented for sqlite - return - } - - err := migration.SetDBVersion(db, dbDriver, 12) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 12, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO report (org_id, cluster, report, reported_at, last_checked_at) VALUES ($1, $2, $3, $4, $5) `, @@ -330,7 +331,7 @@ func TestMigration13(t *testing.T) { ) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, 13) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 13, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) assertRule := func(ruleFQDN types.RuleID, errorKey types.ErrorKey, expectedTemplateData string) { @@ -338,7 +339,7 @@ func TestMigration13(t *testing.T) { templateData string ) - err := db.QueryRow(` + err := dbConn.QueryRow(` SELECT template_data FROM @@ -369,13 +370,13 @@ func TestMigration13(t *testing.T) { } func TestMigration16(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - err := migration.SetDBVersion(db, dbDriver, 15) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 15, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key) VALUES ($1, $2, $3, $4) `, @@ -386,16 +387,12 @@ func TestMigration16(t *testing.T) { ) assert.Error(t, err, `Expected error since recommendation table does not exist yet`) - if dbDriver == types.DBDriverSQLite3 { - assert.Contains(t, err.Error(), "no such table: recommendation") - } else if dbDriver == types.DBDriverPostgres { - assert.Contains(t, err.Error(), `relation "recommendation" does not exist`) - } + assert.Contains(t, err.Error(), `relation "recommendation" does not exist`) - err = migration.SetDBVersion(db, dbDriver, 16) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 16, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key) VALUES ($1, $2, $3, $4) `, @@ -408,20 +405,15 @@ func TestMigration16(t *testing.T) { } func TestMigration19(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // nothing worth testing for sqlite - return - } - - err := migration.SetDBVersion(db, dbDriver, 18) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 18, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - err = db.QueryRow(`SELECT created_at FROM recommendation`).Err() + err = dbConn.QueryRow(`SELECT created_at FROM recommendation`).Err() assert.Error(t, err, "created_at column should not exist") - err = db.QueryRow(`SELECT rule_id FROM recommendation`).Err() + err = dbConn.QueryRow(`SELECT rule_id FROM recommendation`).Err() assert.Error(t, err, "rule_id column should not exist") correctRuleID := testdata.Rule1ID + "|" + testdata.ErrorKey1 @@ -429,7 +421,7 @@ func TestMigration19(t *testing.T) { expectedRuleAfterMigration := string(testdata.Rule1ID) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key) VALUES ($1, $2, $3, $4) `, @@ -440,7 +432,7 @@ func TestMigration19(t *testing.T) { ) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key) VALUES ($1, $2, $3, $4) `, @@ -451,7 +443,7 @@ func TestMigration19(t *testing.T) { ) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, 19) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 19, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) var ( @@ -459,7 +451,7 @@ func TestMigration19(t *testing.T) { ruleID string ) - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT rule_fqdn, rule_id FROM @@ -474,7 +466,7 @@ func TestMigration19(t *testing.T) { assert.Equal(t, expectedRuleAfterMigration, ruleFQDN) assert.Equal(t, string(correctRuleID), ruleID) - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT rule_fqdn, rule_id FROM @@ -490,7 +482,7 @@ func TestMigration19(t *testing.T) { assert.Equal(t, string(correctRuleID), ruleID) var timestamp time.Time - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT created_at FROM @@ -506,23 +498,23 @@ func TestMigration19(t *testing.T) { assert.True(t, timestamp.UTC().Equal(timestamp), "The stored timestamp is not in UTC format") // Step down should remove created_at and rule_id columns - err = migration.SetDBVersion(db, dbDriver, 18) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 18, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - err = db.QueryRow(`SELECT created_at FROM recommendation`).Err() + err = dbConn.QueryRow(`SELECT created_at FROM recommendation`).Err() assert.Error(t, err, "created_at column should not exist") - err = db.QueryRow(`SELECT rule_id FROM recommendation`).Err() + err = dbConn.QueryRow(`SELECT rule_id FROM recommendation`).Err() assert.Error(t, err, "rule_id column should not exist") } func TestMigration18(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - err := migration.SetDBVersion(db, dbDriver, 17) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 17, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO advisor_ratings (user_id, org_id, rule_id, error_key, rated_at, last_updated_at, rating) VALUES @@ -538,16 +530,12 @@ func TestMigration18(t *testing.T) { ) assert.Error(t, err, `Expected error since advisor_ratings table does not exist yet`) - if dbDriver == types.DBDriverSQLite3 { - assert.Contains(t, err.Error(), "no such table: advisor_ratings") - } else if dbDriver == types.DBDriverPostgres { - assert.Contains(t, err.Error(), `relation "advisor_ratings" does not exist`) - } + assert.Contains(t, err.Error(), `relation "advisor_ratings" does not exist`) - err = migration.SetDBVersion(db, dbDriver, 18) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 18, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO advisor_ratings (user_id, org_id, rule_id, error_key, rated_at, last_updated_at, rating) VALUES @@ -565,21 +553,16 @@ func TestMigration18(t *testing.T) { } func TestMigration20(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // nothing worth testing for sqlite - return - } - - err := migration.SetDBVersion(db, dbDriver, 19) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 19, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - err = db.QueryRow(`SELECT rule_fqdn FROM advisor_ratings`).Err() + err = dbConn.QueryRow(`SELECT rule_fqdn FROM advisor_ratings`).Err() assert.Error(t, err, "rule_fqdn column should not exist") - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO advisor_ratings (user_id, org_id, rule_id, error_key, rated_at, last_updated_at, rating) VALUES @@ -595,7 +578,7 @@ func TestMigration20(t *testing.T) { ) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, 20) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 20, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) var ( @@ -603,7 +586,7 @@ func TestMigration20(t *testing.T) { ruleID string ) - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT rule_fqdn, rule_id FROM @@ -619,12 +602,12 @@ func TestMigration20(t *testing.T) { assert.Equal(t, testdata.Rule1CompositeID, types.RuleID(ruleID)) // Step down should rename rule_fqdn column to rule_id and it contains only plugin name - err = migration.SetDBVersion(db, dbDriver, 19) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 19, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - err = db.QueryRow(`SELECT rule_fqdn FROM advisor_ratings`).Err() + err = dbConn.QueryRow(`SELECT rule_fqdn FROM advisor_ratings`).Err() assert.Error(t, err, "rule_fqdn column should not exist") - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT rule_id FROM @@ -640,15 +623,10 @@ func TestMigration20(t *testing.T) { } func TestMigration22(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // sqlite is no longer supported - return - } - - err := migration.SetDBVersion(db, dbDriver, 21) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 21, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) var expectedCorrectCount int @@ -665,7 +643,7 @@ func TestMigration22(t *testing.T) { } // insert toggles - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO cluster_rule_toggle (cluster_id, rule_id, error_key, user_id, disabled, updated_at) VALUES @@ -681,7 +659,7 @@ func TestMigration22(t *testing.T) { helpers.FailOnError(t, err) // insert disable feedbacks - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO cluster_user_rule_disable_feedback (cluster_id, rule_id, error_key, user_id, message, added_at, updated_at) VALUES @@ -698,13 +676,13 @@ func TestMigration22(t *testing.T) { } // migrate to 22 - err = migration.SetDBVersion(db, dbDriver, 22) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 22, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) // retrieve numbers of rows var togglesAfterMigrationCount, feedbacksAfterMigrationCount int - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT count(*) FROM @@ -716,7 +694,7 @@ func TestMigration22(t *testing.T) { // must match with expected count assert.Equal(t, expectedCorrectCount, togglesAfterMigrationCount) - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT count(*) FROM @@ -732,88 +710,73 @@ func TestMigration22(t *testing.T) { } func TestMigration24(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // sqlite is no longer supported - return - } - - err := migration.SetDBVersion(db, dbDriver, 23) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 23, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec( + _, err = dbConn.Exec( `SELECT created_at FROM rule_hit`, ) assert.NotNil(t, err) // migrate to 24 - err = migration.SetDBVersion(db, dbDriver, 24) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 24, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec( + _, err = dbConn.Exec( `SELECT created_at FROM rule_hit`, ) helpers.FailOnError(t, err) } func TestMigration25(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // sqlite is no longer supported - return - } - - err := migration.SetDBVersion(db, dbDriver, 24) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 24, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec( + _, err = dbConn.Exec( `SELECT impacted_since FROM recommendation`, ) assert.Error(t, err) // migrate to 25 - err = migration.SetDBVersion(db, dbDriver, 25) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 25, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec( + _, err = dbConn.Exec( `SELECT impacted_since FROM recommendation`, ) helpers.FailOnError(t, err) } func TestMigration26(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // sqlite is no longer supported - return - } - - err := migration.SetDBVersion(db, dbDriver, 25) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 25, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec( + _, err = dbConn.Exec( `SELECT org_id FROM cluster_rule_toggle`, ) assert.Error(t, err) - _, err = db.Exec( + _, err = dbConn.Exec( `SELECT org_id FROM cluster_rule_user_feedback`, ) assert.Error(t, err) - _, err = db.Exec( + _, err = dbConn.Exec( `SELECT org_id FROM cluster_user_rule_disable_feedback`, ) assert.Error(t, err) // insert into report table - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO report (org_id, cluster, report, reported_at, last_checked_at) VALUES ($1, $2, $3, $4, $5) `, @@ -826,7 +789,7 @@ func TestMigration26(t *testing.T) { helpers.FailOnError(t, err) // insert into cluster_rule_toggle - _, err = db.Exec( + _, err = dbConn.Exec( `INSERT INTO cluster_rule_toggle (cluster_id, rule_id, error_key, user_id, disabled, updated_at) VALUES ($1, $2, $3, $4, $5, $6)`, testdata.ClusterName, @@ -840,7 +803,7 @@ func TestMigration26(t *testing.T) { unknownClusterID := testdata.GetRandomClusterID() // insert into cluster_rule_toggle - _, err = db.Exec( + _, err = dbConn.Exec( `INSERT INTO cluster_rule_toggle (cluster_id, rule_id, error_key, user_id, disabled, updated_at) VALUES ($1, $2, $3, $4, $5, $6)`, unknownClusterID, @@ -853,11 +816,11 @@ func TestMigration26(t *testing.T) { helpers.FailOnError(t, err) // migrate to 26, popoulating org_id column based on report table - err = migration.SetDBVersion(db, dbDriver, 26) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 26, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) var orgID types.OrgID - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT org_id FROM @@ -873,7 +836,7 @@ func TestMigration26(t *testing.T) { // org_id must match that in report table assert.Equal(t, orgID, testdata.OrgID) - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT org_id FROM @@ -900,19 +863,14 @@ func TestMigration27(t *testing.T) { "cluster_user_rule_disable_feedback", } - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // sqlite is no longer supported - return - } - - err := migration.SetDBVersion(db, dbDriver, 26) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 26, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) // insert into report table because of DB constraints - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO report (org_id, cluster, report, reported_at, last_checked_at) VALUES ($1, $2, $3, $4, $5) `, @@ -928,7 +886,7 @@ func TestMigration27(t *testing.T) { for i, orgID := range orgIDList { // insert into cluster_rule_toggle userID := fmt.Sprintf("%v", i) - _, err = db.Exec( + _, err = dbConn.Exec( `INSERT INTO cluster_rule_toggle (cluster_id, rule_id, error_key, user_id, disabled, updated_at, org_id) VALUES ($1, $2, $3, $4, $5, $6, $7)`, testdata.ClusterName, @@ -942,7 +900,7 @@ func TestMigration27(t *testing.T) { helpers.FailOnError(t, err) // insert into cluster_user_rule_disable_feedback - _, err = db.Exec( + _, err = dbConn.Exec( `INSERT INTO cluster_user_rule_disable_feedback (cluster_id, rule_id, error_key, user_id, message, added_at, updated_at, org_id) VALUES @@ -959,7 +917,7 @@ func TestMigration27(t *testing.T) { helpers.FailOnError(t, err) // insert into cluster_rule_user_feedback - _, err = db.Exec( + _, err = dbConn.Exec( `INSERT INTO cluster_rule_user_feedback (cluster_id, rule_id, error_key, user_id, message, added_at, updated_at, user_vote, org_id) VALUES @@ -981,21 +939,21 @@ func TestMigration27(t *testing.T) { for _, table := range tableList { var cnt int query := fmt.Sprintf(countQuery, table) - err = db.QueryRow(query).Scan(&cnt) + err = dbConn.QueryRow(query).Scan(&cnt) helpers.FailOnError(t, err) // expect 3 rows assert.Equal(t, cnt, 3) } // migrate to 27, deleting rows where org_id = 0 - err = migration.SetDBVersion(db, dbDriver, 27) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 27, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) // check correct number of rows remaining (2) for _, table := range tableList { var cnt int query := fmt.Sprintf(countQuery, table) - err = db.QueryRow(query).Scan(&cnt) + err = dbConn.QueryRow(query).Scan(&cnt) helpers.FailOnError(t, err) // expect 2 rows, one was supposed to be deleted in all tables assert.Equal(t, cnt, 2) @@ -1003,19 +961,14 @@ func TestMigration27(t *testing.T) { } func TestMigration28(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // sqlite is no longer supported - return - } - - err := migration.SetDBVersion(db, dbDriver, 27) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 27, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) // insert into rule_disable - _, err = db.Exec( + _, err = dbConn.Exec( `INSERT INTO rule_disable (org_id, user_id, rule_id, error_key, created_at) VALUES @@ -1029,7 +982,7 @@ func TestMigration28(t *testing.T) { helpers.FailOnError(t, err) // insert into rule_disable different user_id, same org_id - _, err = db.Exec( + _, err = dbConn.Exec( `INSERT INTO rule_disable (org_id, user_id, rule_id, error_key, created_at) VALUES @@ -1044,17 +997,17 @@ func TestMigration28(t *testing.T) { helpers.FailOnError(t, err) // delete from table - _, err = db.Exec( + _, err = dbConn.Exec( `DELETE FROM rule_disable`, ) helpers.FailOnError(t, err) // migrate to different constraint - err = migration.SetDBVersion(db, dbDriver, 28) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 28, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) // insert into rule_disable - _, err = db.Exec( + _, err = dbConn.Exec( `INSERT INTO rule_disable (org_id, user_id, rule_id, error_key, created_at) VALUES @@ -1068,7 +1021,7 @@ func TestMigration28(t *testing.T) { helpers.FailOnError(t, err) // insert into rule_disable different user_id, same org_id - _, err = db.Exec( + _, err = dbConn.Exec( `INSERT INTO rule_disable (org_id, user_id, rule_id, error_key, created_at) VALUES @@ -1084,18 +1037,13 @@ func TestMigration28(t *testing.T) { } func TestMigration29(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // nothing worth testing for sqlite - return - } - - err := migration.SetDBVersion(db, dbDriver, 28) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 28, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO cluster_rule_toggle (cluster_id, user_id, org_id, rule_id, error_key, disabled, disabled_at, updated_at) VALUES @@ -1112,17 +1060,17 @@ func TestMigration29(t *testing.T) { ) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, 29) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 29, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - err = db.QueryRow(`SELECT user_id FROM cluster_rule_toggle`).Err() + err = dbConn.QueryRow(`SELECT user_id FROM cluster_rule_toggle`).Err() assert.Error(t, err, "user_id column should not exist") - err = migration.SetDBVersion(db, dbDriver, 28) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 28, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) var userID types.UserID - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT user_id FROM @@ -1138,18 +1086,13 @@ func TestMigration29(t *testing.T) { } func TestMigration30(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // nothing worth testing for sqlite - return - } - - err := migration.SetDBVersion(db, dbDriver, 29) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 29, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO rule_disable (user_id, org_id, rule_id, error_key, justification, created_at, updated_at) VALUES @@ -1165,17 +1108,17 @@ func TestMigration30(t *testing.T) { ) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, 30) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 30, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - err = db.QueryRow(`SELECT user_id FROM rule_disable`).Err() + err = dbConn.QueryRow(`SELECT user_id FROM rule_disable`).Err() assert.Error(t, err, "user_id column should not exist") - err = migration.SetDBVersion(db, dbDriver, 29) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 29, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) var userID types.UserID - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT user_id FROM @@ -1191,18 +1134,13 @@ func TestMigration30(t *testing.T) { } func TestMigration31(t *testing.T) { - db, dbDriver, closer := prepareDBAndInfo(t) + dbConn, dbDriver, dbSchema, closer := ira_helpers.PrepareDBAndInfo(t) defer closer() - if dbDriver == types.DBDriverSQLite3 { - // nothing worth testing for sqlite - return - } - - err := migration.SetDBVersion(db, dbDriver, 30) + err := migration.SetDBVersion(dbConn, dbDriver, dbSchema, 30, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO advisor_ratings (user_id, org_id, rule_fqdn, error_key, rated_at, last_updated_at, rating, rule_id) VALUES @@ -1219,10 +1157,10 @@ func TestMigration31(t *testing.T) { ) helpers.FailOnError(t, err) - err = migration.SetDBVersion(db, dbDriver, 31) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 31, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) - _, err = db.Exec(` + _, err = dbConn.Exec(` INSERT INTO advisor_ratings (org_id, rule_fqdn, error_key, rated_at, last_updated_at, rating, rule_id) VALUES @@ -1238,14 +1176,14 @@ func TestMigration31(t *testing.T) { ) helpers.FailOnError(t, err) - err = db.QueryRow(`SELECT user_id FROM advisor_ratings`).Err() + err = dbConn.QueryRow(`SELECT user_id FROM advisor_ratings`).Err() assert.Error(t, err, "user_id column should not exist") - err = migration.SetDBVersion(db, dbDriver, 30) + err = migration.SetDBVersion(dbConn, dbDriver, dbSchema, 30, ocpmigrations.UsableOCPMigrations) helpers.FailOnError(t, err) var userID types.UserID - err = db.QueryRow(` + err = dbConn.QueryRow(` SELECT user_id FROM diff --git a/migration/ocpmigrations/export_test.go b/migration/ocpmigrations/export_test.go new file mode 100644 index 000000000..0aff97a8c --- /dev/null +++ b/migration/ocpmigrations/export_test.go @@ -0,0 +1,14 @@ +package ocpmigrations + +// Export for testing +// +// This source file contains name aliases of all package-private functions +// that need to be called from unit tests. Aliases should start with uppercase +// letter because unit tests belong to different package. +// +// Please look into the following blogpost: +// https://medium.com/@robiplus/golang-trick-export-for-test-aa16cbd7b8cd +// to see why this trick is needed. +var ( + Mig0004ModifyClusterRuleUserFeedback = mig0004ModifyClusterRuleUserFeedback +) diff --git a/migration/mig_0001_create_report.go b/migration/ocpmigrations/mig_0001_create_report.go similarity index 89% rename from migration/mig_0001_create_report.go rename to migration/ocpmigrations/mig_0001_create_report.go index 7184a3af7..8cc02b051 100644 --- a/migration/mig_0001_create_report.go +++ b/migration/ocpmigrations/mig_0001_create_report.go @@ -14,15 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0001CreateReport = Migration{ +var mig0001CreateReport = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` CREATE TABLE report ( diff --git a/migration/mig_0002_create_rule_content.go b/migration/ocpmigrations/mig_0002_create_rule_content.go similarity index 92% rename from migration/mig_0002_create_rule_content.go rename to migration/ocpmigrations/mig_0002_create_rule_content.go index 784cd9769..285514c0d 100644 --- a/migration/mig_0002_create_rule_content.go +++ b/migration/ocpmigrations/mig_0002_create_rule_content.go @@ -14,15 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0002CreateRuleContent = Migration{ +var mig0002CreateRuleContent = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` CREATE TABLE rule ( diff --git a/migration/mig_0003_create_cluster_rule_user_feedback.go b/migration/ocpmigrations/mig_0003_create_cluster_rule_user_feedback.go similarity index 88% rename from migration/mig_0003_create_cluster_rule_user_feedback.go rename to migration/ocpmigrations/mig_0003_create_cluster_rule_user_feedback.go index 4576a658e..c4e0ca095 100644 --- a/migration/mig_0003_create_cluster_rule_user_feedback.go +++ b/migration/ocpmigrations/mig_0003_create_cluster_rule_user_feedback.go @@ -14,15 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0003CreateClusterRuleUserFeedback = Migration{ +var mig0003CreateClusterRuleUserFeedback = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` CREATE TABLE cluster_rule_user_feedback ( diff --git a/migration/mig_0004_modify_cluster_rule_user_feedback.go b/migration/ocpmigrations/mig_0004_modify_cluster_rule_user_feedback.go similarity index 80% rename from migration/mig_0004_modify_cluster_rule_user_feedback.go rename to migration/ocpmigrations/mig_0004_modify_cluster_rule_user_feedback.go index fcfe37e7b..1ff787ea4 100644 --- a/migration/mig_0004_modify_cluster_rule_user_feedback.go +++ b/migration/ocpmigrations/mig_0004_modify_cluster_rule_user_feedback.go @@ -14,13 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations /* migration4 adds foreign keys to cluster_rule_user_feedback */ -var mig0004ModifyClusterRuleUserFeedback = NewUpdateTableMigration( +import ( + "github.com/RedHatInsights/insights-results-aggregator/migration" +) + +var mig0004ModifyClusterRuleUserFeedback = migration.NewUpdateTableMigration( clusterRuleUserFeedbackTable, ` CREATE TABLE cluster_rule_user_feedback ( @@ -47,10 +51,12 @@ var mig0004ModifyClusterRuleUserFeedback = NewUpdateTableMigration( updated_at TIMESTAMP NOT NULL, PRIMARY KEY(cluster_id, rule_id, user_id), - FOREIGN KEY (cluster_id) + CONSTRAINT cluster_rule_user_feedback_cluster_id_fkey + FOREIGN KEY (cluster_id) REFERENCES report(cluster) ON DELETE CASCADE, - FOREIGN KEY (rule_id) + CONSTRAINT cluster_rule_user_feedback_rule_id_fkey + FOREIGN KEY (rule_id) REFERENCES rule(module) ON DELETE CASCADE ); diff --git a/migration/mig_0005_create_consumer_error.go b/migration/ocpmigrations/mig_0005_create_consumer_error.go similarity index 89% rename from migration/mig_0005_create_consumer_error.go rename to migration/ocpmigrations/mig_0005_create_consumer_error.go index bc24f1cd5..79058cee2 100644 --- a/migration/mig_0005_create_consumer_error.go +++ b/migration/ocpmigrations/mig_0005_create_consumer_error.go @@ -14,15 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0005CreateConsumerError = Migration{ +var mig0005CreateConsumerError = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` CREATE TABLE consumer_error ( diff --git a/migration/mig_0006_add_on_delete_cascade.go b/migration/ocpmigrations/mig_0006_add_on_delete_cascade.go similarity index 71% rename from migration/mig_0006_add_on_delete_cascade.go rename to migration/ocpmigrations/mig_0006_add_on_delete_cascade.go index b94505ab8..d0b5aa3cb 100644 --- a/migration/mig_0006_add_on_delete_cascade.go +++ b/migration/ocpmigrations/mig_0006_add_on_delete_cascade.go @@ -14,14 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations -var mig0006AddOnDeleteCascade = NewUpdateTableMigration( +import ( + "github.com/RedHatInsights/insights-results-aggregator/migration" +) + +var mig0006AddOnDeleteCascade = migration.NewUpdateTableMigration( ruleErrorKeyTable, ` CREATE TABLE rule_error_key ( "error_key" VARCHAR NOT NULL, - "rule_module" VARCHAR NOT NULL REFERENCES rule(module), + "rule_module" VARCHAR NOT NULL, "condition" VARCHAR NOT NULL, "description" VARCHAR NOT NULL, "impact" INTEGER NOT NULL, @@ -29,7 +33,9 @@ var mig0006AddOnDeleteCascade = NewUpdateTableMigration( "publish_date" TIMESTAMP NOT NULL, "active" BOOLEAN NOT NULL, "generic" VARCHAR NOT NULL, - PRIMARY KEY("error_key", "rule_module") + PRIMARY KEY("error_key", "rule_module"), + CONSTRAINT rule_error_key_rule_module_fkey + FOREIGN KEY ("rule_module") REFERENCES rule("module") ) `, nil, @@ -44,7 +50,9 @@ var mig0006AddOnDeleteCascade = NewUpdateTableMigration( "publish_date" TIMESTAMP NOT NULL, "active" BOOLEAN NOT NULL, "generic" VARCHAR NOT NULL, - PRIMARY KEY("error_key", "rule_module") + PRIMARY KEY("error_key", "rule_module"), + CONSTRAINT rule_error_key_rule_module_fkey + FOREIGN KEY ("rule_module") REFERENCES rule("module") ON DELETE CASCADE ) `, ) diff --git a/migration/mig_0007_create_cluster_rule_toggle.go b/migration/ocpmigrations/mig_0007_create_cluster_rule_toggle.go similarity index 84% rename from migration/mig_0007_create_cluster_rule_toggle.go rename to migration/ocpmigrations/mig_0007_create_cluster_rule_toggle.go index ca324b933..1f87306c9 100644 --- a/migration/mig_0007_create_cluster_rule_toggle.go +++ b/migration/ocpmigrations/mig_0007_create_cluster_rule_toggle.go @@ -11,15 +11,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0007CreateClusterRuleToggle = Migration{ +var mig0007CreateClusterRuleToggle = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` CREATE TABLE cluster_rule_toggle ( @@ -31,7 +32,7 @@ var mig0007CreateClusterRuleToggle = Migration{ enabled_at TIMESTAMP NULL, updated_at TIMESTAMP NOT NULL, - CHECK (disabled >= 0 AND disabled <= 1), + disabled_check SMALLINT CHECK (disabled >= 0 AND disabled <= 1), PRIMARY KEY(cluster_id, rule_id, user_id) )`) return err diff --git a/migration/mig_0008_add_offset_field_to_report_table.go b/migration/ocpmigrations/mig_0008_add_offset_field_to_report_table.go similarity index 66% rename from migration/mig_0008_add_offset_field_to_report_table.go rename to migration/ocpmigrations/mig_0008_add_offset_field_to_report_table.go index faf124d19..7fea31a65 100644 --- a/migration/mig_0008_add_offset_field_to_report_table.go +++ b/migration/ocpmigrations/mig_0008_add_offset_field_to_report_table.go @@ -11,15 +11,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0008AddOffsetFieldToReportTable = Migration{ +var mig0008AddOffsetFieldToReportTable = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` ALTER TABLE report ADD COLUMN kafka_offset BIGINT NOT NULL DEFAULT 0 @@ -27,19 +28,6 @@ var mig0008AddOffsetFieldToReportTable = Migration{ return err }, StepDown: func(tx *sql.Tx, driver types.DBDriver) error { - if driver == types.DBDriverSQLite3 { - return downgradeTable(tx, clusterReportTable, ` - CREATE TABLE report ( - org_id INTEGER NOT NULL, - cluster VARCHAR NOT NULL UNIQUE, - report VARCHAR NOT NULL, - reported_at TIMESTAMP, - last_checked_at TIMESTAMP, - PRIMARY KEY(org_id, cluster) - ) - `, []string{"org_id", "cluster", "report", "reported_at", "last_checked_at"}) - } - _, err := tx.Exec(` ALTER TABLE report DROP COLUMN kafka_offset `) diff --git a/migration/mig_0009_add_index_on_report_kafka_offset.go b/migration/ocpmigrations/mig_0009_add_index_on_report_kafka_offset.go similarity index 86% rename from migration/mig_0009_add_index_on_report_kafka_offset.go rename to migration/ocpmigrations/mig_0009_add_index_on_report_kafka_offset.go index 408769d65..d324be594 100644 --- a/migration/mig_0009_add_index_on_report_kafka_offset.go +++ b/migration/ocpmigrations/mig_0009_add_index_on_report_kafka_offset.go @@ -11,15 +11,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0009AddIndexOnReportKafkaOffset = Migration{ +var mig0009AddIndexOnReportKafkaOffset = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` CREATE INDEX report_kafka_offset_btree_idx ON report (kafka_offset) diff --git a/migration/mig_0010_add_tags_on_rule_content.go b/migration/ocpmigrations/mig_0010_add_tags_on_rule_content.go similarity index 55% rename from migration/mig_0010_add_tags_on_rule_content.go rename to migration/ocpmigrations/mig_0010_add_tags_on_rule_content.go index 8a0900a68..4da266628 100644 --- a/migration/mig_0010_add_tags_on_rule_content.go +++ b/migration/ocpmigrations/mig_0010_add_tags_on_rule_content.go @@ -11,15 +11,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0010AddTagsFieldToRuleErrorKeyTable = Migration{ +var mig0010AddTagsFieldToRuleErrorKeyTable = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` ALTER TABLE rule_error_key ADD COLUMN tags VARCHAR NOT NULL DEFAULT '' @@ -27,23 +28,6 @@ var mig0010AddTagsFieldToRuleErrorKeyTable = Migration{ return err }, StepDown: func(tx *sql.Tx, driver types.DBDriver) error { - if driver == types.DBDriverSQLite3 { - return downgradeTable(tx, ruleErrorKeyTable, ` - CREATE TABLE rule_error_key ( - "error_key" VARCHAR NOT NULL, - "rule_module" VARCHAR NOT NULL REFERENCES rule(module) ON DELETE CASCADE, - "condition" VARCHAR NOT NULL, - "description" VARCHAR NOT NULL, - "impact" INTEGER NOT NULL, - "likelihood" INTEGER NOT NULL, - "publish_date" TIMESTAMP NOT NULL, - "active" BOOLEAN NOT NULL, - "generic" VARCHAR NOT NULL, - PRIMARY KEY("error_key", "rule_module") - ) - `, []string{"error_key", "rule_module", "condition", "description", "impact", "likelihood", "publish_date", "active", "generic"}) - } - _, err := tx.Exec(` ALTER TABLE rule_error_key DROP COLUMN tags `) diff --git a/migration/mig_0011_remove_fk_and_content_tables.go b/migration/ocpmigrations/mig_0011_remove_fk_and_content_tables.go similarity index 78% rename from migration/mig_0011_remove_fk_and_content_tables.go rename to migration/ocpmigrations/mig_0011_remove_fk_and_content_tables.go index 5f82adf4f..800273f9f 100644 --- a/migration/mig_0011_remove_fk_and_content_tables.go +++ b/migration/ocpmigrations/mig_0011_remove_fk_and_content_tables.go @@ -11,7 +11,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations /* migration11 removes foreign keys to rules and rule error keys, and then removes the @@ -21,10 +21,11 @@ package migration import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var migrationClusterRuleUserFeedback = Migration{ +var migrationClusterRuleUserFeedback = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { var err error if driver == types.DBDriverPostgres { @@ -34,7 +35,7 @@ var migrationClusterRuleUserFeedback = Migration{ `) } else { - err = upgradeTable( + err = migration.UpgradeTable( tx, clusterRuleUserFeedbackTable, ` @@ -48,6 +49,7 @@ var migrationClusterRuleUserFeedback = Migration{ updated_at TIMESTAMP NOT NULL, PRIMARY KEY(cluster_id, rule_id, user_id), + CONSTRAINT cluster_rule_user_feedback_cluster_id_fkey FOREIGN KEY (cluster_id) REFERENCES report(cluster) ON DELETE CASCADE ) `) @@ -55,36 +57,15 @@ var migrationClusterRuleUserFeedback = Migration{ return err }, StepDown: func(tx *sql.Tx, driver types.DBDriver) error { - if driver == types.DBDriverPostgres { - _, err := tx.Exec(` + _, err := tx.Exec(` ALTER TABLE cluster_rule_user_feedback - ADD FOREIGN KEY(rule_id) REFERENCES rule(module) ON DELETE CASCADE + ADD CONSTRAINT cluster_rule_user_feedback_rule_id_fkey FOREIGN KEY(rule_id) REFERENCES rule(module) ON DELETE CASCADE `) - return err - } - - return downgradeTable( - tx, - clusterRuleUserFeedbackTable, - ` - CREATE TABLE cluster_rule_user_feedback ( - cluster_id VARCHAR NOT NULL, - rule_id VARCHAR NOT NULL, - user_id VARCHAR NOT NULL, - message VARCHAR NOT NULL, - user_vote SMALLINT NOT NULL, - added_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - PRIMARY KEY(cluster_id, rule_id, user_id), - FOREIGN KEY (cluster_id) REFERENCES report(cluster) ON DELETE CASCADE - FOREIGN KEY (rule_id) REFERENCES rule(module) ON DELETE CASCADE - )`, - nil) + return err }, } -var migrationContentTables = Migration{ +var migrationContentTables = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { if _, err := tx.Exec("DROP TABLE rule_error_key"); err != nil { return err @@ -126,7 +107,7 @@ var migrationContentTables = Migration{ }, } -var mig0011RemoveFKAndContentTables = Migration{ +var mig0011RemoveFKAndContentTables = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { if err := migrationClusterRuleUserFeedback.StepUp(tx, driver); err != nil { return err diff --git a/migration/mig_0012_create_cluster_user_rule_disable_feedback.go b/migration/ocpmigrations/mig_0012_create_cluster_user_rule_disable_feedback.go similarity index 88% rename from migration/mig_0012_create_cluster_user_rule_disable_feedback.go rename to migration/ocpmigrations/mig_0012_create_cluster_user_rule_disable_feedback.go index bbc98b7fa..cd440013a 100644 --- a/migration/mig_0012_create_cluster_user_rule_disable_feedback.go +++ b/migration/ocpmigrations/mig_0012_create_cluster_user_rule_disable_feedback.go @@ -14,15 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0012CreateClusterUserRuleDisableFeedback = Migration{ +var mig0012CreateClusterUserRuleDisableFeedback = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` CREATE TABLE cluster_user_rule_disable_feedback ( diff --git a/migration/mig_0013_add_rule_hit_table.go b/migration/ocpmigrations/mig_0013_add_rule_hit_table.go similarity index 93% rename from migration/mig_0013_add_rule_hit_table.go rename to migration/ocpmigrations/mig_0013_add_rule_hit_table.go index e7d2edd24..92d1ba253 100644 --- a/migration/mig_0013_add_rule_hit_table.go +++ b/migration/ocpmigrations/mig_0013_add_rule_hit_table.go @@ -11,17 +11,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" "encoding/json" "time" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0013AddRuleHitTable = Migration{ +var mig0013AddRuleHitTable = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { _, err := tx.Exec(` CREATE TABLE rule_hit ( @@ -36,11 +37,6 @@ var mig0013AddRuleHitTable = Migration{ return err } - if driver != types.DBDriverPostgres { - // if sqlite, just ignore the actual migration cuz sqlite is too stupid for that - return nil - } - _, err = tx.Exec(` DECLARE report_cursor CURSOR FOR SELECT diff --git a/migration/mig_0014_modify_cluster_rule_toggle.go b/migration/ocpmigrations/mig_0014_modify_cluster_rule_toggle.go similarity index 88% rename from migration/mig_0014_modify_cluster_rule_toggle.go rename to migration/ocpmigrations/mig_0014_modify_cluster_rule_toggle.go index fb01f4a1b..7f694cc4f 100644 --- a/migration/mig_0014_modify_cluster_rule_toggle.go +++ b/migration/ocpmigrations/mig_0014_modify_cluster_rule_toggle.go @@ -14,11 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -28,7 +29,7 @@ import ( which can't be used in a primary key... */ -var mig0014ModifyClusterRuleToggleAlter = Migration{ +var mig0014ModifyClusterRuleToggleAlter = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` ALTER TABLE cluster_rule_toggle DROP CONSTRAINT cluster_rule_toggle_pkey, @@ -47,7 +48,7 @@ var mig0014ModifyClusterRuleToggleAlter = Migration{ }, } -var mig0014ModifyClusterRuleToggleGeneral = NewUpdateTableMigration( +var mig0014ModifyClusterRuleToggleGeneral = migration.NewUpdateTableMigration( clusterRuleToggleTable, ` CREATE TABLE cluster_rule_toggle ( @@ -74,13 +75,13 @@ var mig0014ModifyClusterRuleToggleGeneral = NewUpdateTableMigration( enabled_at TIMESTAMP NULL, updated_at TIMESTAMP NOT NULL, - CHECK (disabled >= 0 AND disabled <= 1), + disabled_check SMALLINT CHECK (disabled >= 0 AND disabled <= 1), PRIMARY KEY(cluster_id, rule_id) ) `, ) -var mig0014ModifyClusterRuleToggle = Migration{ +var mig0014ModifyClusterRuleToggle = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { if driver == types.DBDriverPostgres { return mig0014ModifyClusterRuleToggleAlter.StepUp(tx, driver) diff --git a/migration/mig_0015_modify_cluster_rule_toggle.go b/migration/ocpmigrations/mig_0015_modify_cluster_rule_toggle.go similarity index 66% rename from migration/mig_0015_modify_cluster_rule_toggle.go rename to migration/ocpmigrations/mig_0015_modify_cluster_rule_toggle.go index c6448a10f..fa5b2f109 100644 --- a/migration/mig_0015_modify_cluster_rule_toggle.go +++ b/migration/ocpmigrations/mig_0015_modify_cluster_rule_toggle.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" "github.com/rs/zerolog/log" ) @@ -31,7 +32,7 @@ import ( */ // mig0015ClusterRuleToggle is a helper for update the cluster_rule_toggle table -var mig0015ClusterRuleToggle = Migration{ +var mig0015ClusterRuleToggle = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { var err error @@ -41,69 +42,25 @@ var mig0015ClusterRuleToggle = Migration{ return err } - if driver == types.DBDriverPostgres { - _, err = tx.Exec(` + _, err = tx.Exec(` ALTER TABLE cluster_rule_toggle DROP CONSTRAINT cluster_rule_toggle_pkey, ADD CONSTRAINT cluster_rule_toggle_pkey PRIMARY KEY (cluster_id, rule_id, error_key); `) - } else { - err = mig0015ClusterRuleTogglePrimaryKeysSQLite.StepUp(tx, driver) - } - return err }, StepDown: func(tx *sql.Tx, driver types.DBDriver) error { - if driver == types.DBDriverPostgres { - _, err := tx.Exec(` + _, err := tx.Exec(` ALTER TABLE cluster_rule_toggle DROP CONSTRAINT cluster_rule_toggle_pkey, ADD CONSTRAINT cluster_rule_toggle_pkey PRIMARY KEY (cluster_id, rule_id); ALTER TABLE cluster_rule_toggle DROP COLUMN error_key; `) - return err - } - - return mig0015ClusterRuleTogglePrimaryKeysSQLite.StepDown(tx, driver) + return err }, } -// mig0015ClusterRuleTogglePrimaryKeysSQLite is a helper to update PKs on cluster_rule_toggle table in SQLite -var mig0015ClusterRuleTogglePrimaryKeysSQLite = NewUpdateTableMigration( - clusterRuleToggleTable, - ` - CREATE TABLE cluster_rule_toggle ( - cluster_id VARCHAR NOT NULL, - rule_id VARCHAR NOT NULL, - user_id VARCHAR NULL, - disabled SMALLINT NOT NULL, - disabled_at TIMESTAMP NULL, - enabled_at TIMESTAMP NULL, - updated_at TIMESTAMP NOT NULL, - - CHECK (disabled >= 0 AND disabled <= 1), - PRIMARY KEY(cluster_id, rule_id) - ) - `, - []string{"cluster_id", "rule_id", "user_id", "disabled", "disabled_at", "enabled_at", "updated_at"}, - ` - CREATE TABLE cluster_rule_toggle ( - cluster_id VARCHAR NOT NULL, - rule_id VARCHAR NOT NULL, - user_id VARCHAR NULL, - disabled SMALLINT NOT NULL, - disabled_at TIMESTAMP NULL, - enabled_at TIMESTAMP NULL, - updated_at TIMESTAMP NOT NULL, - error_key VARCHAR NOT NULL, - - CHECK (disabled >= 0 AND disabled <= 1), - PRIMARY KEY(cluster_id, rule_id, error_key) - ) - `, -) - // mig0015ClusterRuleUserFeedback is a helper for update the cluster_rule_user_feedback table -var mig0015ClusterRuleUserFeedback = Migration{ +var mig0015ClusterRuleUserFeedback = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { var err error if _, err = tx.Exec(` @@ -112,69 +69,26 @@ var mig0015ClusterRuleUserFeedback = Migration{ return err } - if driver == types.DBDriverPostgres { - _, err = tx.Exec(` + _, err = tx.Exec(` ALTER TABLE cluster_rule_user_feedback DROP CONSTRAINT cluster_rule_user_feedback_pkey1, ADD CONSTRAINT cluster_rule_user_feedback_pkey PRIMARY KEY (cluster_id, rule_id, user_id, error_key); `) - } else { - err = mig0015ClusterRuleUserFeedbackPrimaryKeysSQLite.StepUp(tx, driver) - } - return err }, StepDown: func(tx *sql.Tx, driver types.DBDriver) error { - if driver == types.DBDriverPostgres { - _, err := tx.Exec(` + _, err := tx.Exec(` ALTER TABLE cluster_rule_user_feedback DROP CONSTRAINT cluster_rule_user_feedback_pkey, ADD CONSTRAINT cluster_rule_user_feedback_pkey1 PRIMARY KEY (cluster_id, rule_id, user_id); ALTER TABLE cluster_rule_user_feedback DROP COLUMN error_key; `) - return err - } + return err - return mig0015ClusterRuleUserFeedbackPrimaryKeysSQLite.StepDown(tx, driver) }, } -// mig0015ClusterRuleUserFeedbackPrimaryKeysSQLite is a helper to update PKs on cluster_rule_user_feedback table in SQLite -var mig0015ClusterRuleUserFeedbackPrimaryKeysSQLite = NewUpdateTableMigration( - clusterRuleUserFeedbackTable, - ` - CREATE TABLE cluster_rule_user_feedback ( - cluster_id VARCHAR NOT NULL, - rule_id VARCHAR NOT NULL, - user_id VARCHAR NOT NULL, - message VARCHAR NOT NULL, - user_vote SMALLINT NOT NULL, - added_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - PRIMARY KEY(cluster_id, rule_id, user_id), - FOREIGN KEY (cluster_id) REFERENCES report(cluster) ON DELETE CASCADE - ) - `, - []string{"cluster_id", "rule_id", "user_id", "message", "user_vote", "added_at", "updated_at"}, - ` - CREATE TABLE cluster_rule_user_feedback ( - cluster_id VARCHAR NOT NULL, - rule_id VARCHAR NOT NULL, - user_id VARCHAR NOT NULL, - message VARCHAR NOT NULL, - user_vote SMALLINT NOT NULL, - added_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - error_key VARCHAR NOT NULL, - - PRIMARY KEY(cluster_id, rule_id, user_id, error_key), - FOREIGN KEY (cluster_id) REFERENCES report(cluster) ON DELETE CASCADE - ) - `, -) - // mig0015ClusterUserRuleDisableFeedback is a helper for update the cluster_user_rule_disable_feedback -var mig0015ClusterUserRuleDisableFeedback = Migration{ +var mig0015ClusterUserRuleDisableFeedback = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { var err error @@ -184,63 +98,23 @@ var mig0015ClusterUserRuleDisableFeedback = Migration{ return err } - if driver == types.DBDriverPostgres { - _, err = tx.Exec(` + _, err = tx.Exec(` ALTER TABLE cluster_user_rule_disable_feedback DROP CONSTRAINT cluster_user_rule_disable_feedback_pkey, ADD CONSTRAINT cluster_user_rule_disable_feedback_pkey PRIMARY KEY (cluster_id, user_id, rule_id, error_key); `) - } else { - err = mig0015ClusterUserRuleDisableFeedbackPrimaryKeysSQLite.StepUp(tx, driver) - } - return err }, StepDown: func(tx *sql.Tx, driver types.DBDriver) error { - if driver == types.DBDriverPostgres { - _, err := tx.Exec(` + _, err := tx.Exec(` ALTER TABLE cluster_user_rule_disable_feedback DROP CONSTRAINT cluster_user_rule_disable_feedback_pkey, ADD CONSTRAINT cluster_user_rule_disable_feedback_pkey PRIMARY KEY (cluster_id, user_id, rule_id); ALTER TABLE cluster_user_rule_disable_feedback DROP COLUMN error_key; `) - return err - } - - return mig0015ClusterUserRuleDisableFeedbackPrimaryKeysSQLite.StepDown(tx, driver) + return err }, } -// mig0015ClusterUserRuleDisableFeedbackPrimaryKeysSQLite is a helper to update PKs on cluster_user_rule_disable_feedback table in SQLite -var mig0015ClusterUserRuleDisableFeedbackPrimaryKeysSQLite = NewUpdateTableMigration( - clusterUserRuleDisableFeedbackTable, - ` - CREATE TABLE cluster_user_rule_disable_feedback ( - cluster_id VARCHAR NOT NULL, - user_id VARCHAR NOT NULL, - rule_id VARCHAR NOT NULL, - message VARCHAR NOT NULL, - added_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - PRIMARY KEY(cluster_id, user_id, rule_id) - ) - `, - []string{"cluster_id", "user_id", "rule_id", "message", "added_at", "updated_at"}, - ` - CREATE TABLE cluster_user_rule_disable_feedback ( - cluster_id VARCHAR NOT NULL, - user_id VARCHAR NOT NULL, - rule_id VARCHAR NOT NULL, - message VARCHAR NOT NULL, - added_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - error_key VARCHAR NOT NULL, - - PRIMARY KEY(cluster_id, user_id, rule_id, error_key) - ) - `, -) - // migrateClusterRoleToggleData is a helper to update the current data with default values // It takes the only possible value for error_key on the rules that only has one possible error key func migrateClusterRoleToggleData(tx *sql.Tx) error { @@ -260,18 +134,18 @@ func migrateClusterRoleToggleData(tx *sql.Tx) error { log.Info().Str("rule_id", ruleIDWildcard).Str("errorKey", errorKey).Msg("Updating DB data") - err := updateTableData(tx, "cluster_rule_toggle", updateClusterRuleToggleQuery, errorKey, ruleIDWildcard) + err := migration.UpdateTableData(tx, "cluster_rule_toggle", updateClusterRuleToggleQuery, errorKey, ruleIDWildcard) if err != nil { return err } - err = updateTableData(tx, "cluster_rule_user_feedback", updateClusterRuleUserFeedbackQuery, errorKey, ruleIDWildcard) + err = migration.UpdateTableData(tx, "cluster_rule_user_feedback", updateClusterRuleUserFeedbackQuery, errorKey, ruleIDWildcard) if err != nil { return err } - err = updateTableData(tx, "cluster_user_rule_disable_feedback", updateClusterUserRuleDisableFeedbackQuery, errorKey, ruleIDWildcard) + err = migration.UpdateTableData(tx, "cluster_user_rule_disable_feedback", updateClusterUserRuleDisableFeedbackQuery, errorKey, ruleIDWildcard) if err != nil { return err } @@ -281,7 +155,7 @@ func migrateClusterRoleToggleData(tx *sql.Tx) error { } // mig0015ModifyClusterRuleTables migrates the tables related to user toggle and feedback with error_key -var mig0015ModifyFeedbackTables = Migration{ +var mig0015ModifyFeedbackTables = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { if err := mig0015ClusterRuleToggle.StepUp(tx, driver); err != nil { return err diff --git a/migration/mig_0016_add_recommendations_table.go b/migration/ocpmigrations/mig_0016_add_recommendations_table.go similarity index 92% rename from migration/mig_0016_add_recommendations_table.go rename to migration/ocpmigrations/mig_0016_add_recommendations_table.go index 0c5dc402c..9362e8359 100644 --- a/migration/mig_0016_add_recommendations_table.go +++ b/migration/ocpmigrations/mig_0016_add_recommendations_table.go @@ -14,15 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0016AddRecommendationsTable = Migration{ +var mig0016AddRecommendationsTable = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { // Create recommendation table using currently stored rule hits if driver != types.DBDriverPostgres { diff --git a/migration/mig_0017_add_system_wide_rule_disable_table.go b/migration/ocpmigrations/mig_0017_add_system_wide_rule_disable_table.go similarity index 90% rename from migration/mig_0017_add_system_wide_rule_disable_table.go rename to migration/ocpmigrations/mig_0017_add_system_wide_rule_disable_table.go index 6aeeef0d1..c48c43556 100644 --- a/migration/mig_0017_add_system_wide_rule_disable_table.go +++ b/migration/ocpmigrations/mig_0017_add_system_wide_rule_disable_table.go @@ -11,15 +11,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0017AddSystemWideRuleDisableTable = Migration{ +var mig0017AddSystemWideRuleDisableTable = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { // Create rule_disable table _, err := tx.Exec(` diff --git a/migration/mig_0018_add_ratings_table.go b/migration/ocpmigrations/mig_0018_add_ratings_table.go similarity index 89% rename from migration/mig_0018_add_ratings_table.go rename to migration/ocpmigrations/mig_0018_add_ratings_table.go index bce22d6b8..053344253 100644 --- a/migration/mig_0018_add_ratings_table.go +++ b/migration/ocpmigrations/mig_0018_add_ratings_table.go @@ -11,15 +11,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0018AddRatingsTable = Migration{ +var mig0018AddRatingsTable = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { // Create advisor_ratings _, err := tx.Exec(` diff --git a/migration/mig_0019_modify_recommendation_table.go b/migration/ocpmigrations/mig_0019_modify_recommendation_table.go similarity index 70% rename from migration/mig_0019_modify_recommendation_table.go rename to migration/ocpmigrations/mig_0019_modify_recommendation_table.go index 0a634b384..ed2582095 100644 --- a/migration/mig_0019_modify_recommendation_table.go +++ b/migration/ocpmigrations/mig_0019_modify_recommendation_table.go @@ -14,33 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0019ModifyRecommendationTable = Migration{ +var mig0019ModifyRecommendationTable = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { - if driver != types.DBDriverPostgres { - // Add rule_id column - _, err := tx.Exec(` - ALTER TABLE recommendation ADD COLUMN rule_id VARCHAR NOT NULL DEFAULT '.'; - UPDATE recommendation SET rule_id = rule_fqdn + '|' + error_key; - `) - if err != nil { - return err - } - - // Add the created_at column - _, err = tx.Exec(` - ALTER TABLE recommendation ADD COLUMN created_at TIMESTAMP WITHOUT TIME ZONE; - `) - return err - } - // Fix rule_fqdn value for records created in migration 16 // The regex expression has two parts separated by a logical or `|`: // - (\.(?!.*\|)(?!.*\.|\|).*) finds the last dot and all the characters that follow it, @@ -78,23 +62,12 @@ var mig0019ModifyRecommendationTable = Migration{ return err }, StepDown: func(tx *sql.Tx, driver types.DBDriver) error { - if driver == types.DBDriverPostgres { - // Remove the created_at column - _, err := tx.Exec(` + // Remove the created_at column + _, err := tx.Exec(` ALTER TABLE recommendation DROP COLUMN IF EXISTS created_at; ALTER TABLE recommendation DROP COLUMN IF EXISTS rule_id; `) - return err - } else if driver == types.DBDriverSQLite3 { - // Why would SQLite allow you to drop a column... - _, err := tx.Exec(` - CREATE TABLE recommendation_temp AS SELECT org_id, cluster_id, rule_fqdn, error_key FROM recommendation; - DROP TABLE recommendation; - ALTER TABLE recommendation_temp RENAME TO recommendation; - `) - return err - } - return nil + return err }, } diff --git a/migration/mig_0020_add_rule_id_to_advisor_ratings.go b/migration/ocpmigrations/mig_0020_add_rule_id_to_advisor_ratings.go similarity index 59% rename from migration/mig_0020_add_rule_id_to_advisor_ratings.go rename to migration/ocpmigrations/mig_0020_add_rule_id_to_advisor_ratings.go index ec60633de..010cc544a 100644 --- a/migration/mig_0020_add_rule_id_to_advisor_ratings.go +++ b/migration/ocpmigrations/mig_0020_add_rule_id_to_advisor_ratings.go @@ -11,15 +11,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0020ModifyAdvisorRatingsTable = Migration{ +var mig0020ModifyAdvisorRatingsTable = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { // common code for renaming column and adding a new one _, err := tx.Exec(` @@ -31,48 +32,23 @@ var mig0020ModifyAdvisorRatingsTable = Migration{ return err } - if driver == types.DBDriverSQLite3 { - // Update new column with values from older ones - // Sqlite uses || to concatenate strings ???? - _, err = tx.Exec(` - UPDATE advisor_ratings SET rule_id = rule_fqdn || '|' || error_key; - `) - - return err - } else if driver == types.DBDriverPostgres { - // only postgres - // Rename rule_id to rule_fqdn - _, err = tx.Exec(` + // Rename rule_id to rule_fqdn + _, err = tx.Exec(` UPDATE advisor_ratings SET rule_id = CONCAT(rule_fqdn, '|', error_key); `) - return err - } - - return nil + return err }, StepDown: func(tx *sql.Tx, driver types.DBDriver) error { // Remove the rule_id column - if driver == types.DBDriverPostgres { - _, err := tx.Exec(` + _, err := tx.Exec(` ALTER TABLE advisor_ratings DROP COLUMN IF EXISTS rule_id; `) - if err != nil { - return err - } - } else if driver == types.DBDriverSQLite3 { - // sqlite, why so serious? - _, err := tx.Exec(` - CREATE TABLE advisor_ratings_temp AS SELECT user_id, org_id, rule_fqdn, error_key, rated_at, last_updated_at, rating FROM advisor_ratings; - DROP TABLE advisor_ratings; - ALTER TABLE advisor_ratings_temp RENAME TO advisor_ratings; - `) - if err != nil { - return err - } + if err != nil { + return err } // Rename rule_fqdn back to rule_id - _, err := tx.Exec(` + _, err = tx.Exec(` ALTER TABLE advisor_ratings RENAME COLUMN rule_fqdn TO rule_id; `) return err diff --git a/migration/mig_0021_add_gathered_at_to_report.go b/migration/ocpmigrations/mig_0021_add_gathered_at_to_report.go similarity index 84% rename from migration/mig_0021_add_gathered_at_to_report.go rename to migration/ocpmigrations/mig_0021_add_gathered_at_to_report.go index 47e008571..9580d6f56 100644 --- a/migration/mig_0021_add_gathered_at_to_report.go +++ b/migration/ocpmigrations/mig_0021_add_gathered_at_to_report.go @@ -12,16 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0021AddGatheredAtToReport = Migration{ +var mig0021AddGatheredAtToReport = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(`ALTER TABLE report ADD COLUMN gathered_at TIMESTAMP`) return err @@ -35,6 +36,6 @@ var mig0021AddGatheredAtToReport = Migration{ return err } - return fmt.Errorf(driverUnsupportedErr, driver) + return fmt.Errorf(migration.DriverUnsupportedErr, driver) }, } diff --git a/migration/mig_0022_cleanup_enable_disable_tables.go b/migration/ocpmigrations/mig_0022_cleanup_enable_disable_tables.go similarity index 89% rename from migration/mig_0022_cleanup_enable_disable_tables.go rename to migration/ocpmigrations/mig_0022_cleanup_enable_disable_tables.go index aa367a466..70e182163 100644 --- a/migration/mig_0022_cleanup_enable_disable_tables.go +++ b/migration/ocpmigrations/mig_0022_cleanup_enable_disable_tables.go @@ -12,15 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0022CleanupEnableDisableTables = Migration{ +var mig0022CleanupEnableDisableTables = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { // delete from cluster_rule_toggle rows, where rule_id doesn't end in .report _, err := tx.Exec(` diff --git a/migration/mig_0023_add_report_info_table.go b/migration/ocpmigrations/mig_0023_add_report_info_table.go similarity index 88% rename from migration/mig_0023_add_report_info_table.go rename to migration/ocpmigrations/mig_0023_add_report_info_table.go index 21d5ad4be..733989488 100644 --- a/migration/mig_0023_add_report_info_table.go +++ b/migration/ocpmigrations/mig_0023_add_report_info_table.go @@ -12,15 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package migration +package ocpmigrations import ( "database/sql" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0023AddReportInfoTable = Migration{ +var mig0023AddReportInfoTable = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(` CREATE TABLE report_info ( diff --git a/migration/mig_0024_add_timestamp_to_rule_hit.go b/migration/ocpmigrations/mig_0024_add_timestamp_to_rule_hit.go similarity index 84% rename from migration/mig_0024_add_timestamp_to_rule_hit.go rename to migration/ocpmigrations/mig_0024_add_timestamp_to_rule_hit.go index 70ae9ed35..1e2fc76bd 100644 --- a/migration/mig_0024_add_timestamp_to_rule_hit.go +++ b/migration/ocpmigrations/mig_0024_add_timestamp_to_rule_hit.go @@ -12,16 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0024AddTimestampToRuleHit = Migration{ +var mig0024AddTimestampToRuleHit = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(`ALTER TABLE rule_hit ADD COLUMN created_at TIMESTAMP`) return err @@ -35,6 +36,6 @@ var mig0024AddTimestampToRuleHit = Migration{ return err } - return fmt.Errorf(driverUnsupportedErr, driver) + return fmt.Errorf(migration.DriverUnsupportedErr, driver) }, } diff --git a/migration/mig_0025_add_impacted_to_recommendation.go b/migration/ocpmigrations/mig_0025_add_impacted_to_recommendation.go similarity index 84% rename from migration/mig_0025_add_impacted_to_recommendation.go rename to migration/ocpmigrations/mig_0025_add_impacted_to_recommendation.go index 43a21570b..c02c47ad2 100644 --- a/migration/mig_0025_add_impacted_to_recommendation.go +++ b/migration/ocpmigrations/mig_0025_add_impacted_to_recommendation.go @@ -12,16 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0025AddImpactedToRecommendation = Migration{ +var mig0025AddImpactedToRecommendation = migration.Migration{ StepUp: func(tx *sql.Tx, _ types.DBDriver) error { _, err := tx.Exec(`ALTER TABLE recommendation ADD COLUMN impacted_since TIMESTAMP WITHOUT TIME ZONE`) return err @@ -35,6 +36,6 @@ var mig0025AddImpactedToRecommendation = Migration{ return err } - return fmt.Errorf(driverUnsupportedErr, driver) + return fmt.Errorf(migration.DriverUnsupportedErr, driver) }, } diff --git a/migration/mig_0026_add_and_populate_org_id_columns.go b/migration/ocpmigrations/mig_0026_add_and_populate_org_id_columns.go similarity index 93% rename from migration/mig_0026_add_and_populate_org_id_columns.go rename to migration/ocpmigrations/mig_0026_add_and_populate_org_id_columns.go index ae913b136..3bb4806e6 100644 --- a/migration/mig_0026_add_and_populate_org_id_columns.go +++ b/migration/ocpmigrations/mig_0026_add_and_populate_org_id_columns.go @@ -18,12 +18,13 @@ // information about cluster_id + org_id, so we don't have to use the org_id // populator from c.r.c. team. -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -39,7 +40,7 @@ var tablesToUpdate = []string{ "cluster_user_rule_disable_feedback", } -var mig0026AddAndPopulateOrgIDColumns = Migration{ +var mig0026AddAndPopulateOrgIDColumns = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { for _, table := range tablesToUpdate { diff --git a/migration/mig_0027_cleanup_invalid_rows_missing_org_id.go b/migration/ocpmigrations/mig_0027_cleanup_invalid_rows_missing_org_id.go similarity index 93% rename from migration/mig_0027_cleanup_invalid_rows_missing_org_id.go rename to migration/ocpmigrations/mig_0027_cleanup_invalid_rows_missing_org_id.go index 7f62fb59b..2db8c4974 100644 --- a/migration/mig_0027_cleanup_invalid_rows_missing_org_id.go +++ b/migration/ocpmigrations/mig_0027_cleanup_invalid_rows_missing_org_id.go @@ -19,12 +19,13 @@ // report table (could happen, we sometimes don't check if a cluster exists), therefore they're // invalid rows. -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" "github.com/rs/zerolog/log" ) @@ -39,7 +40,7 @@ var tablesToDeleteFrom = []string{ "cluster_user_rule_disable_feedback", } -var mig0027CleanupInvalidRowsMissingOrgID = Migration{ +var mig0027CleanupInvalidRowsMissingOrgID = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { if driver == types.DBDriverPostgres { diff --git a/migration/mig_0028_alter_primary_key_rule_disable_index.go b/migration/ocpmigrations/mig_0028_alter_primary_key_rule_disable_index.go similarity index 92% rename from migration/mig_0028_alter_primary_key_rule_disable_index.go rename to migration/ocpmigrations/mig_0028_alter_primary_key_rule_disable_index.go index 491d72094..8e7701077 100644 --- a/migration/mig_0028_alter_primary_key_rule_disable_index.go +++ b/migration/ocpmigrations/mig_0028_alter_primary_key_rule_disable_index.go @@ -16,12 +16,13 @@ // user_id and we want to keep old records for informational purposes. Creates a non-unique // index instead to retain the same performance. -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -30,7 +31,7 @@ const ( pkName = "rule_disable_pkey" ) -var mig0028AlterRuleDisablePKAndIndex = Migration{ +var mig0028AlterRuleDisablePKAndIndex = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { if driver == types.DBDriverPostgres { diff --git a/migration/mig_0029_alter_cluster_rule_toggle_user_id.go b/migration/ocpmigrations/mig_0029_alter_cluster_rule_toggle_user_id.go similarity index 90% rename from migration/mig_0029_alter_cluster_rule_toggle_user_id.go rename to migration/ocpmigrations/mig_0029_alter_cluster_rule_toggle_user_id.go index 6daa75d6f..c16308cb4 100644 --- a/migration/mig_0029_alter_cluster_rule_toggle_user_id.go +++ b/migration/ocpmigrations/mig_0029_alter_cluster_rule_toggle_user_id.go @@ -17,16 +17,17 @@ // in the constraint(s), so we can remove the column without needing to // alter it. -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -var mig0029DropClusterRuleToggleUserIDColumn = Migration{ +var mig0029DropClusterRuleToggleUserIDColumn = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { if driver == types.DBDriverPostgres { dropColumnQuery := fmt.Sprintf(alterTableDropColumnQuery, clusterRuleToggleTable, userIDColumn) diff --git a/migration/mig_0030_alter_rule_disable_drop_user_id.go b/migration/ocpmigrations/mig_0030_alter_rule_disable_drop_user_id.go similarity index 90% rename from migration/mig_0030_alter_rule_disable_drop_user_id.go rename to migration/ocpmigrations/mig_0030_alter_rule_disable_drop_user_id.go index 12f259b84..31b0d7f05 100644 --- a/migration/mig_0030_alter_rule_disable_drop_user_id.go +++ b/migration/ocpmigrations/mig_0030_alter_rule_disable_drop_user_id.go @@ -17,12 +17,13 @@ // in the constraint(s), so we can remove the column without needing to // alter it. -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -30,7 +31,7 @@ const ( ruleDisableTable = "rule_disable" ) -var mig0030DropRuleDisableUserIDColumn = Migration{ +var mig0030DropRuleDisableUserIDColumn = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { if driver == types.DBDriverPostgres { dropColumnQuery := fmt.Sprintf(alterTableDropColumnQuery, ruleDisableTable, userIDColumn) diff --git a/migration/mig_0031_alter_constraint_drop_user_id_advisor_ratings.go b/migration/ocpmigrations/mig_0031_alter_constraint_drop_user_id_advisor_ratings.go similarity index 95% rename from migration/mig_0031_alter_constraint_drop_user_id_advisor_ratings.go rename to migration/ocpmigrations/mig_0031_alter_constraint_drop_user_id_advisor_ratings.go index e1895198a..068bb53fa 100644 --- a/migration/mig_0031_alter_constraint_drop_user_id_advisor_ratings.go +++ b/migration/ocpmigrations/mig_0031_alter_constraint_drop_user_id_advisor_ratings.go @@ -18,12 +18,13 @@ // duplicate records per rule (or per cluster) per organization. // Organization IDs were added and populated in previous migrations. -package migration +package ocpmigrations import ( "database/sql" "fmt" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -39,7 +40,7 @@ var migrationStep = alterConstraintStep{ newConstraint: "(org_id, rule_fqdn, error_key)", } -var mig0031AlterConstraintDropUserAdvisorRatings = Migration{ +var mig0031AlterConstraintDropUserAdvisorRatings = migration.Migration{ StepUp: func(tx *sql.Tx, driver types.DBDriver) error { if driver == types.DBDriverPostgres { // there are 27 rows in the prod table, all made by our QE org. it's not worth coming diff --git a/migration/migrations.go b/migration/ocpmigrations/ocp_migrations.go similarity index 52% rename from migration/migrations.go rename to migration/ocpmigrations/ocp_migrations.go index 6867cb19a..c5f58fd11 100644 --- a/migration/migrations.go +++ b/migration/ocpmigrations/ocp_migrations.go @@ -1,22 +1,22 @@ -// Copyright 2020 Red Hat, Inc -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +package ocpmigrations -package migration +import "github.com/RedHatInsights/insights-results-aggregator/migration" -// migrations is a list of migrations that, when applied in their order, -// create the most recent version of the database from scratch. -var migrations = []Migration{ +const ( + ruleErrorKeyTable = "rule_error_key" + clusterRuleUserFeedbackTable = "cluster_rule_user_feedback" + clusterReportTable = "report" + clusterRuleToggleTable = "cluster_rule_toggle" + clusterUserRuleDisableFeedbackTable = "cluster_user_rule_disable_feedback" + alterTableDropColumnQuery = "ALTER TABLE %v DROP COLUMN IF EXISTS %v" + alterTableAddVarcharColumn = "ALTER TABLE %v ADD COLUMN %v VARCHAR NOT NULL DEFAULT '-1'" + alterTableDropPK = "ALTER TABLE %v DROP CONSTRAINT IF EXISTS %v_pkey" + alterTableAddPK = "ALTER TABLE %v ADD CONSTRAINT %v_pkey PRIMARY KEY %v" + userIDColumn = "user_id" +) + +// UsableOCPMigrations contains all OCP recommendation related migrations +var UsableOCPMigrations = []migration.Migration{ mig0001CreateReport, mig0002CreateRuleContent, mig0003CreateClusterRuleUserFeedback, diff --git a/pr_check.sh b/pr_check.sh index 773d60f97..8f6b2a803 100755 --- a/pr_check.sh +++ b/pr_check.sh @@ -21,12 +21,17 @@ set -exv # -------------------------------------------- APP_NAME="ccx-data-pipeline" # name of app-sre "application" folder this component lives in REF_ENV="insights-production" -COMPONENT_NAME="ccx-insights-results" # name of app-sre "resourceTemplate" in deploy.yaml for this component +# NOTE: insights-results-aggregator contains deployment for multiple services +# for pull requests we need latest git PR version of these components to be +# deployed to ephemeral env and overriding resource template --set-template-ref. +# Using multiple components name in COMPONENT_NAME forces bonfire to use the +# git version of clowdapp.yaml(or any other) file from the pull request. +COMPONENT_NAME="ccx-insights-results ccx-redis dvo-writer" # name of app-sre "resourceTemplate" in deploy.yaml for this component IMAGE="quay.io/cloudservices/insights-results-aggregator" -COMPONENTS="ccx-data-pipeline ccx-insights-results insights-content-service insights-results-smart-proxy ocp-advisor-frontend ccx-mock-ams" # space-separated list of components to laod +COMPONENTS="ccx-data-pipeline ccx-insights-results ccx-redis dvo-writer dvo-extractor insights-content-service insights-results-smart-proxy ccx-mock-ams" # space-separated list of components to load COMPONENTS_W_RESOURCES="" # component to keep CACHE_FROM_LATEST_IMAGE="true" -DEPLOY_FRONTENDS="true" # enable for front-end/UI tests +DEPLOY_FRONTENDS="false" export IQE_PLUGINS="ccx" # Run all pipeline tests @@ -37,6 +42,7 @@ export IQE_TEST_IMPORTANCE="" export IQE_CJI_TIMEOUT="30m" export IQE_SELENIUM="false" export IQE_ENV="ephemeral" +export IQE_ENV_VARS="DYNACONF_USER_PROVIDER__rbac_enabled=false" # NOTE: Uncomment to skip pull request integration tests and comment out # the rest of the file. @@ -52,6 +58,8 @@ function deploy_ephemeral() { } function run_smoke_tests() { + # Workaround: cji_smoke_test.sh requires only one component name. Fallback to only one component name. + export COMPONENT_NAME="ccx-insights-results" source $CICD_ROOT/cji_smoke_test.sh source $CICD_ROOT/post_test_results.sh # publish results in Ibutsu } diff --git a/producer/producer.go b/producer/producer.go index 1bc9d35b8..c3f8ff9e8 100644 --- a/producer/producer.go +++ b/producer/producer.go @@ -19,6 +19,8 @@ limitations under the License. package producer import ( + "strings" + "github.com/Shopify/sarama" "github.com/rs/zerolog/log" @@ -57,7 +59,7 @@ func New(brokerCfg broker.Configuration) (*KafkaProducer, error) { // needed producer parameter saramaConfig.Producer.Return.Successes = true - producer, err := sarama.NewSyncProducer([]string{brokerCfg.Address}, saramaConfig) + producer, err := sarama.NewSyncProducer(strings.Split(brokerCfg.Addresses, ","), saramaConfig) if err != nil { log.Error().Err(err).Msg("unable to create a new Kafka producer") return nil, err diff --git a/producer/producer_test.go b/producer/producer_test.go index 403daa759..f7c88f2f2 100644 --- a/producer/producer_test.go +++ b/producer/producer_test.go @@ -31,12 +31,11 @@ import ( "github.com/RedHatInsights/insights-results-aggregator/broker" "github.com/RedHatInsights/insights-results-aggregator/producer" ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" - "github.com/RedHatInsights/insights-results-aggregator/types" ) var ( brokerCfg = broker.Configuration{ - Address: "localhost:1234", + Addresses: "localhost:1234", Topic: "consumer-topic", PayloadTrackerTopic: "payload-tracker-topic", DeadLetterQueueTopic: "dlq-topic", @@ -50,7 +49,7 @@ func init() { zerolog.SetGlobalLevel(zerolog.WarnLevel) } -// Test Producer creation with a non accessible Kafka broker +// Test Producer creation with a non-accessible Kafka broker func TestNewProducerBadBroker(t *testing.T) { const expectedErr = "kafka: client has run out of available brokers to talk to (Is your cluster reachable?)" @@ -89,7 +88,7 @@ func TestProducerTrackPayloadEmptyRequestID(t *testing.T) { helpers.FailOnError(t, payloadTrackerProducer.Close()) }() - err := payloadTrackerProducer.TrackPayload(types.RequestID(""), testTimestamp, nil, nil, producer.StatusReceived) + err := payloadTrackerProducer.TrackPayload("", testTimestamp, nil, nil, producer.StatusReceived) assert.NoError(t, err, "payload tracking failed") } @@ -133,7 +132,7 @@ func TestProducerNew(t *testing.T) { prod, err := producer.New( broker.Configuration{ - Address: mockBroker.Addr(), + Addresses: mockBroker.Addr(), Topic: brokerCfg.Topic, PayloadTrackerTopic: brokerCfg.PayloadTrackerTopic, Enabled: brokerCfg.Enabled, @@ -152,7 +151,7 @@ func TestDeadLetterProducerNew(t *testing.T) { prod, err := producer.NewDeadLetterProducer( broker.Configuration{ - Address: mockBroker.Addr(), + Addresses: mockBroker.Addr(), Topic: brokerCfg.Topic, PayloadTrackerTopic: brokerCfg.PayloadTrackerTopic, Enabled: brokerCfg.Enabled, @@ -172,7 +171,7 @@ func TestPayloadTrackerProducerNew(t *testing.T) { prod, err := producer.NewPayloadTrackerProducer( broker.Configuration{ - Address: mockBroker.Addr(), + Addresses: mockBroker.Addr(), Topic: brokerCfg.Topic, PayloadTrackerTopic: brokerCfg.PayloadTrackerTopic, Enabled: brokerCfg.Enabled, diff --git a/server.go b/server.go index c7260a9d3..45a69fd0b 100644 --- a/server.go +++ b/server.go @@ -23,6 +23,7 @@ import ( "github.com/RedHatInsights/insights-results-aggregator/conf" "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/server" + "github.com/RedHatInsights/insights-results-aggregator/storage" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -35,36 +36,36 @@ var ( func startServer() error { defer finishServerInstanceInitialization() - dbStorage, err := createStorage() + ocpRecommendationsStorage, dvoRecommendationsStorage, err := createStorage() if err != nil { return err } - defer closeStorage(dbStorage) + defer closeStorage(ocpRecommendationsStorage) + defer closeStorage(dvoRecommendationsStorage) serverCfg := conf.GetServerConfiguration() - serverInstance = server.New(serverCfg, dbStorage) + serverInstance = server.New(serverCfg, ocpRecommendationsStorage, dvoRecommendationsStorage) // fill-in additional info used by /info endpoint handler fillInInfoParams(serverInstance.InfoParams) // try to retrieve the actual DB migration version // and add it into the `params` map - log.Info().Msg("Setting DB version for /info endpoint") - if conf.GetStorageConfiguration().Type == types.SQLStorage { - // migration and DB versioning is now supported for SQL - // databases only - currentVersion, err := migration.GetDBVersion(dbStorage.GetConnection()) - if err != nil { - const msg = "Unable to retrieve DB migration version" - log.Error().Err(err).Msg(msg) - serverInstance.InfoParams["DB_version"] = msg - } else { - serverInstance.InfoParams["DB_version"] = strconv.Itoa(int(currentVersion)) - } - } else { - serverInstance.InfoParams["DB_version"] = "not supported" - } + log.Info().Msg("Setting OCP DB version for /info endpoint") + setDBVersion( + serverInstance, + conf.GetOCPRecommendationsStorageConfiguration(), + ocpRecommendationsStorage, + "OCP_DB_version", + ) + log.Info().Msg("Setting DVO DB version for /info endpoint") + setDBVersion( + serverInstance, + conf.GetDVORecommendationsStorageConfiguration(), + dvoRecommendationsStorage, + "DVO_DB_version", + ) err = serverInstance.Start(finishServerInstanceInitialization) if err != nil { @@ -75,6 +76,22 @@ func startServer() error { return nil } +func setDBVersion(s *server.HTTPServer, storageConf storage.Configuration, storage storage.Storage, key string) { + if storageConf.Type != types.SQLStorage { + // migration and DB versioning is now supported for SQL databases only + s.InfoParams[key] = "not supported" + return + } + currentVersion, err := migration.GetDBVersion(storage.GetConnection(), storage.GetDBSchema()) + if err != nil { + const msg = "Unable to retrieve DB migration version" + log.Error().Err(err).Msg(msg) + serverInstance.InfoParams[key] = msg + } else { + serverInstance.InfoParams[key] = strconv.Itoa(int(currentVersion)) + } +} + func stopServer() error { waitForServerToStartOrFail() diff --git a/server/auth.go b/server/auth.go index f3f966235..120355bbb 100644 --- a/server/auth.go +++ b/server/auth.go @@ -1,5 +1,3 @@ -// Auth implementation based on JWT - /* Copyright © 2019, 2020, 2021, 2022 Red Hat, Inc. @@ -22,9 +20,9 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "net/http" - "strings" "github.com/RedHatInsights/insights-operator-utils/collections" types "github.com/RedHatInsights/insights-results-types" @@ -42,9 +40,6 @@ type Identity = types.Identity // Token is x-rh-identity struct type Token = types.Token -// JWTPayload is structure that contain data from parsed JWT token -type JWTPayload = types.JWTPayload - // Authentication middleware for checking auth rights func (server *HTTPServer) Authentication(next http.Handler, noAuthURLs []string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -74,25 +69,8 @@ func (server *HTTPServer) Authentication(next http.Handler, noAuthURLs []string) } tk := &types.Token{} - // if we took JWT token, it has different structure than x-rh-identity - if server.Config.AuthType == "jwt" { - jwtPayload := &types.JWTPayload{} - err = json.Unmarshal(decoded, jwtPayload) - if err != nil { - // malformed token, returns with HTTP code 403 as usual - log.Error().Err(err).Msg(malformedTokenMessage) - handleServerError(w, &UnauthorizedError{ErrString: malformedTokenMessage}) - return - } - // Map JWT token to inner token - tk.Identity = types.Identity{ - AccountNumber: jwtPayload.AccountNumber, - OrgID: jwtPayload.OrgID, - User: types.User{ - UserID: jwtPayload.UserID, - }, - } - } else { + + if server.Config.AuthType == "xrh" { // auth type is xrh (x-rh-identity header) err = json.Unmarshal(decoded, tk) if err != nil { @@ -101,6 +79,11 @@ func (server *HTTPServer) Authentication(next http.Handler, noAuthURLs []string) handleServerError(w, &UnauthorizedError{ErrString: malformedTokenMessage}) return } + } else { + err := errors.New("unknown auth type") + log.Error().Err(err).Send() + handleServerError(w, err) + return } if tk.Identity.OrgID == 0 { @@ -144,22 +127,7 @@ func (server *HTTPServer) GetCurrentUserID(request *http.Request) (types.UserID, } func (server *HTTPServer) getAuthTokenHeader(_ http.ResponseWriter, r *http.Request) (string, error) { - var tokenHeader string - // In case of testing on local machine we don't take x-rh-identity header, but instead Authorization with JWT token in it - if server.Config.AuthType == "jwt" { - tokenHeader = r.Header.Get("Authorization") // Grab the token from the header - splitted := strings.Split(tokenHeader, " ") // The token normally comes in format `Bearer {token-body}`, we check if the retrieved token matched this requirement - if len(splitted) != 2 { - const message = "Invalid/Malformed auth token" - return "", &UnauthorizedError{ErrString: message} - } - - // Here we take JWT token which include 3 parts, we need only second one - splitted = strings.Split(splitted[1], ".") - tokenHeader = splitted[1] - } else { - tokenHeader = r.Header.Get("x-rh-identity") // Grab the token from the header - } + tokenHeader := r.Header.Get("x-rh-identity") // Grab the token from the header if tokenHeader == "" { const message = "Missing auth token" diff --git a/server/auth_test.go b/server/auth_test.go index d44a13ecb..56990e12e 100644 --- a/server/auth_test.go +++ b/server/auth_test.go @@ -27,6 +27,9 @@ import ( types "github.com/RedHatInsights/insights-results-types" ) +// goodXRHAuthToken contains dummy data +const goodXRHAuthToken = `eyJpZGVudGl0eSI6eyJhY2NvdW50X251bWJlciI6IjEiLCJvcmdfaWQiOiIxIiwidHlwZSI6IlVzZXIiLCJ1c2VyIjp7InVzZXJuYW1lIjoiamRvZSIsInVzZXJfaWQiOiIxIiwiZW1haWwiOiJqZG9lQGFjbWUuY29tIiwiZmlyc3RfbmFtZSI6IkpvaG4iLCJsYXN0X25hbWUiOiJEb2UiLCJpc19hY3RpdmUiOnRydWUsImlzX29yZ19hZG1pbiI6ZmFsc2UsImlzX2ludGVybmFsIjpmYWxzZSwibG9jYWxlIjoiZW5fVVMifSwiaW50ZXJuYWwiOnsib3JnX2lkIjoiMSIsImF1dGhfdHlwZSI6ImJhc2ljLWF1dGgiLCJhdXRoX3RpbWUiOjYzMDB9fX0K` + var configAuth = server.Configuration{ Address: ":8080", APIPrefix: "/api/test/", @@ -111,3 +114,24 @@ func TestBadOrganizationID(t *testing.T) { Body: body, }) } + +// TestUnsupportedAuthType checks how that only "xrh" auth type is supported +func TestUnsupportedAuthType(t *testing.T) { + config := server.Configuration{ + Address: ":8080", + APIPrefix: "/api/test/", + Debug: false, + Auth: true, + AuthType: "jwt", // unsupported auth type + MaximumFeedbackMessageLength: 255, + } + + helpers.AssertAPIRequest(t, nil, &config, &helpers.APIRequest{ + Method: http.MethodGet, + Endpoint: server.ClustersForOrganizationEndpoint, + EndpointArgs: []interface{}{1}, + XRHIdentity: goodXRHAuthToken, + }, &helpers.APIResponse{ + StatusCode: http.StatusInternalServerError, + }) +} diff --git a/server/dvo_handlers.go b/server/dvo_handlers.go new file mode 100644 index 000000000..fce05a130 --- /dev/null +++ b/server/dvo_handlers.go @@ -0,0 +1,321 @@ +/* +Copyright © 2024 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "regexp" + "strings" + "time" + + "github.com/RedHatInsights/insights-operator-utils/generators" + httputils "github.com/RedHatInsights/insights-operator-utils/http" + "github.com/RedHatInsights/insights-operator-utils/responses" + "github.com/RedHatInsights/insights-results-aggregator/types" + "github.com/rs/zerolog/log" +) + +const ( + namespaceIDParam = "namespace" + // RecommendationSuffix is used to strip a suffix from rule ID + RecommendationSuffix = ".recommendation" +) + +// Cluster structure contains cluster UUID and cluster name +type Cluster struct { + UUID string `json:"uuid"` + DisplayName string `json:"display_name"` +} + +// Namespace structure contains basic information about namespace +type Namespace struct { + UUID string `json:"uuid"` + Name string `json:"name"` +} + +// Metadata structure contains basic information about workload metadata +type Metadata struct { + Recommendations int `json:"recommendations"` + Objects int `json:"objects"` + ReportedAt string `json:"reported_at"` + LastCheckedAt string `json:"last_checked_at"` + HighestSeverity int `json:"highest_severity"` + HitsBySeverity map[int]int `json:"hits_by_severity"` +} + +// WorkloadsForNamespace structure represents a single entry of the namespace list with some aggregations +type WorkloadsForNamespace struct { + Cluster Cluster `json:"cluster"` + Namespace Namespace `json:"namespace"` + Metadata Metadata `json:"metadata"` + RecommendationsHitCount map[string]int `json:"recommendations_hit_count"` +} + +// WorkloadsForCluster structure represents workload for one selected cluster +type WorkloadsForCluster struct { + Status string `json:"status"` + Cluster Cluster `json:"cluster"` + Namespace Namespace `json:"namespace"` + Metadata Metadata `json:"metadata"` + Recommendations []DVORecommendation `json:"recommendations"` +} + +// DVORecommendation structure represents one DVO-related recommendation +type DVORecommendation struct { + Check string `json:"check"` + Details string `json:"details"` + Resolution string `json:"resolution"` + Modified string `json:"modified"` + MoreInfo string `json:"more_info"` + TemplateData map[string]interface{} `json:"extra_data"` + Objects []DVOObject `json:"objects"` +} + +// DVOObject structure +type DVOObject struct { + Kind string `json:"kind"` + UID string `json:"uid"` +} + +// readNamespace retrieves namespace UUID from request +// if it's not possible, it writes http error to the writer and returns error +func readNamespace(writer http.ResponseWriter, request *http.Request) ( + namespace string, err error, +) { + namespaceID, err := httputils.GetRouterParam(request, namespaceIDParam) + if err != nil { + handleServerError(writer, err) + return + } + + validatedNamespaceID, err := validateNamespaceID(namespaceID) + if err != nil { + err = &RouterParsingError{ + ParamName: namespaceIDParam, + ParamValue: namespaceID, + ErrString: err.Error(), + } + handleServerError(writer, err) + return + } + + return validatedNamespaceID, nil +} + +func validateNamespaceID(namespace string) (string, error) { + IDValidator := regexp.MustCompile(`^.{1,256}$`) + + if !IDValidator.MatchString(namespace) { + message := fmt.Sprintf("invalid namespace ID: '%s'", namespace) + err := errors.New(message) + log.Error().Err(err).Msg(message) + return "", err + } + + return namespace, nil +} + +// getWorkloads retrieves all namespaces and workloads for given organization +func (server *HTTPServer) getWorkloads(writer http.ResponseWriter, request *http.Request) { + tStart := time.Now() + + // extract org_id from URL + orgID, ok := readOrgID(writer, request) + if !ok { + // everything has been handled + return + } + log.Debug().Int(orgIDStr, int(orgID)).Msg("getWorkloads") + + workloads, err := server.StorageDvo.ReadWorkloadsForOrganization(orgID) + if err != nil { + log.Error().Err(err).Msg("Errors retrieving DVO workload recommendations from storage") + handleServerError(writer, err) + return + } + + processedWorkloads := server.processDVOWorkloads(workloads) + + log.Debug().Uint32(orgIDStr, uint32(orgID)).Msgf( + "getWorkloads took %s", time.Since(tStart), + ) + err = responses.SendOK(writer, responses.BuildOkResponseWithData("workloads", processedWorkloads)) + if err != nil { + log.Error().Err(err).Msg(responseDataError) + } +} + +func (server *HTTPServer) processDVOWorkloads(workloads []types.DVOReport) ( + processedWorkloads []WorkloadsForNamespace, +) { + for _, workload := range workloads { + processedWorkloads = append(processedWorkloads, WorkloadsForNamespace{ + Cluster: Cluster{ + UUID: workload.ClusterID, + }, + Namespace: Namespace{ + UUID: workload.NamespaceID, + Name: workload.NamespaceName, + }, + Metadata: Metadata{ + Recommendations: int(workload.Recommendations), + Objects: int(workload.Objects), + ReportedAt: string(workload.ReportedAt), + LastCheckedAt: string(workload.LastCheckedAt), + }, + // TODO: fill RecommendationsHitCount map efficiently instead of processing the report again every time + }) + } + + return +} + +// getWorkloadsForNamespace retrieves data about a single namespace within a cluster +func (server *HTTPServer) getWorkloadsForNamespace(writer http.ResponseWriter, request *http.Request) { + tStart := time.Now() + + orgID, ok := readOrgID(writer, request) + if !ok { + // everything has been handled + return + } + + clusterName, successful := readClusterName(writer, request) + if !successful { + // everything has been handled already + return + } + + namespaceID, err := readNamespace(writer, request) + if err != nil { + return + } + + log.Debug().Int(orgIDStr, int(orgID)).Str("namespaceID", namespaceID).Msgf("getWorkloadsForNamespace cluster %v", clusterName) + + workload, err := server.StorageDvo.ReadWorkloadsForClusterAndNamespace(orgID, clusterName, namespaceID) + if err != nil { + log.Error().Err(err).Msg("Errors retrieving DVO workload recommendations from storage") + handleServerError(writer, err) + return + } + + processedWorkload := server.ProcessSingleDVONamespace(workload) + + log.Info().Uint32(orgIDStr, uint32(orgID)).Msgf( + "getWorkloadsForNamespace took %s", time.Since(tStart), + ) + err = responses.SendOK(writer, responses.BuildOkResponseWithData("workloads", processedWorkload)) + if err != nil { + log.Error().Err(err).Msg(responseDataError) + } +} + +// ProcessSingleDVONamespace processes a report, filters out mismatching namespaces, returns processed results +func (server *HTTPServer) ProcessSingleDVONamespace(workload types.DVOReport) ( + processedWorkloads WorkloadsForCluster, +) { + processedWorkloads = WorkloadsForCluster{ + Cluster: Cluster{ + UUID: workload.ClusterID, + }, + Namespace: Namespace{ + UUID: workload.NamespaceID, + Name: workload.NamespaceName, + }, + Metadata: Metadata{ + Recommendations: int(workload.Recommendations), + Objects: int(workload.Objects), + ReportedAt: string(workload.ReportedAt), + LastCheckedAt: string(workload.LastCheckedAt), + }, + Recommendations: []DVORecommendation{}, + } + + var report string + + switch string([]rune(workload.Report)[:1]) { + case `"`: + // we're dealing with a quoted `"{\"system\":{}}"` string + // unmarshalling into a string first before unmarshalling into a struct will remove the leading/trailing quotes + // and also take care of the escaped `\"` quotes and replaces them with valid `"`, producing a valid JSON + err := json.Unmarshal(json.RawMessage(workload.Report), &report) + if err != nil { + log.Error().Err(err).Msgf("report has unknown structure: [%v]", string([]rune(workload.Report)[:100])) + } + case `{`: + // we're dealing with either a valid JSON `{"system":{}}` or a string with escaped + // quotes `{\"system\":{}}`. Stripping escape chars `\` if any, produces a valid JSON + report = strings.Replace(workload.Report, `\`, "", -1) + default: + log.Error().Msgf("report has unknown structure: [%v]", string([]rune(workload.Report)[:100])) + return + } + + var dvoReport types.DVOMetrics + err := json.Unmarshal([]byte(report), &dvoReport) + if err != nil { + log.Error().Err(err).Msgf("error unmarshalling full report: [%v]", string([]rune(report)[:100])) + log.Info().Msgf("report without escape %v", string([]rune(workload.Report)[:100])) + return + } + + for _, recommendation := range dvoReport.WorkloadRecommendations { + filteredObjects := make([]DVOObject, 0) + for i := range recommendation.Workloads { + object := &recommendation.Workloads[i] + + // filter out other namespaces + if object.NamespaceUID != processedWorkloads.Namespace.UUID { + continue + } + filteredObjects = append(filteredObjects, DVOObject{ + Kind: object.Kind, + UID: object.UID, + }) + } + + // because the whole report contains a list of recommendations and each rec. contains + // a list of objects + namespaces, it can happen that upon filtering the objects to get rid + // of namespaces that weren't requested, we can end up with 0 hitting objects in that namespace + if len(filteredObjects) == 0 { + continue + } + + // recommendation.ResponseID doesn't contain the full rule ID, so smart-proxy was unable to retrieve content, we need to build it + compositeRuleID, err := generators.GenerateCompositeRuleID( + // for some unknown reason, there's a `.recommendation` suffix for each rule hit instead of the usual .report + types.RuleFQDN(strings.TrimSuffix(recommendation.Component, RecommendationSuffix)), + types.ErrorKey(recommendation.Key), + ) + if err != nil { + log.Error().Err(err).Msg("error generating composite rule ID for rule") + continue + } + + processedWorkloads.Recommendations = append(processedWorkloads.Recommendations, DVORecommendation{ + Check: string(compositeRuleID), + Objects: filteredObjects, + TemplateData: recommendation.Details, + }) + } + + return +} diff --git a/server/dvo_handlers_test.go b/server/dvo_handlers_test.go new file mode 100644 index 000000000..c3390d8c2 --- /dev/null +++ b/server/dvo_handlers_test.go @@ -0,0 +1,190 @@ +/* +Copyright © 2020, 2021, 2022, 2023, 2024 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/RedHatInsights/insights-results-aggregator-data/testdata" + "github.com/RedHatInsights/insights-results-aggregator/server" + "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" + "github.com/RedHatInsights/insights-results-aggregator/types" + "github.com/stretchr/testify/assert" +) + +const ( + /* + test reports have following structure: + { + ... + "workload_recommendations": [ + { + "response_id": "unset_requirements|DVO_UNSET_REQUIREMENTS", + ... + "workloads": [ + { + "namespace": "namespace-name-A", + ... + "uid": "193a2099-1234-5678-916a-d570c9aac158" + }, + { + "namespace": "namespace-name-A", + ... + "uid": "193a2099-0000-1111-916a-d570c9aac158" + } + ] + }, + { + "response_id": "excluded_pod|EXCLUDED_POD", + ... + "workloads": [ + { + "namespace": "namespace-name-B", + ... + "uid": "12345678-1234-5678-916a-d570c9aac158" + } + ] + } + ] + } + + */ + currentDvoReportFromDB = `"{\"system\":{\"metadata\":{},\"hostname\":null},\"fingerprints\":[],\"version\":1,\"analysis_metadata\":{},\"workload_recommendations\":[{\"response_id\":\"unset_requirements|DVO_UNSET_REQUIREMENTS\",\"component\":\"ccx_rules_ocp.external.dvo.unset_requirements.recommendation\",\"key\":\"DVO_UNSET_REQUIREMENTS\",\"details\":{\"check_name\":\"\",\"check_url\":\"\",\"samples\":[{\"namespace_uid\":\"NAMESPACE-UID-A\",\"kind\":\"DaemonSet\",\"uid\":\"193a2099-1234-5678-916a-d570c9aac158\"}]},\"tags\":[],\"links\":{\"jira\":[\"https://issues.redhat.com/browse/AN_ISSUE\"],\"product_documentation\":[]},\"workloads\":[{\"namespace\":\"namespace-name-A\",\"namespace_uid\":\"NAMESPACE-UID-A\",\"kind\":\"DaemonSet\",\"name\":\"test-name-0099\",\"uid\":\"193a2099-1234-5678-916a-d570c9aac158\"},{\"namespace\":\"namespace-name-A\",\"namespace_uid\":\"NAMESPACE-UID-A\",\"kind\":\"Pod\",\"name\":\"test-name-0001\",\"uid\":\"193a2099-0000-1111-916a-d570c9aac158\"}]},{\"response_id\":\"excluded_pod|EXCLUDED_POD\",\"component\":\"ccx_rules_ocp.external.dvo.excluded_pod.recommendation\",\"key\":\"EXCLUDED_POD\",\"details\":{\"check_name\":\"\",\"check_url\":\"\",\"samples\":[{\"namespace_uid\":\"NAMESPACE-UID-B\",\"kind\":\"DaemonSet\",\"uid\":\"12345678-1234-5678-916a-d570c9aac158\"}]},\"tags\":[],\"links\":{\"jira\":[\"https://issues.redhat.com/browse/AN_ISSUE\"],\"product_documentation\":[]},\"workloads\":[{\"namespace\":\"namespace-name-B\",\"namespace_uid\":\"NAMESPACE-UID-B\",\"kind\":\"DaemonSet\",\"name\":\"test-name-1234\",\"uid\":\"12345678-1234-5678-916a-d570c9aac158\"}]}]}"` + // fixedDvoReportFromDB is what the string inside the report column should look like (after we fix the encoding issues) + fixedDvoReportFromDB = `{"system":{"metadata":{},"hostname":null},"fingerprints":[],"version":1,"analysis_metadata":{},"workload_recommendations":[{"response_id":"unset_requirements|DVO_UNSET_REQUIREMENTS","component":"ccx_rules_ocp.external.dvo.unset_requirements.recommendation","key":"DVO_UNSET_REQUIREMENTS","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","uid":"193a2099-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","name":"test-name-0099","uid":"193a2099-1234-5678-916a-d570c9aac158"},{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"Pod","name":"test-name-0001","uid":"193a2099-0000-1111-916a-d570c9aac158"}]},{"response_id":"excluded_pod|EXCLUDED_POD","component":"ccx_rules_ocp.external.dvo.excluded_pod.recommendation","key":"EXCLUDED_POD","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","uid":"12345678-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-B","namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","name":"test-name-1234","uid":"12345678-1234-5678-916a-d570c9aac158"}]}]}` + objectBUID = `12345678-1234-5678-916a-d570c9aac158` + namespaceAID = "NAMESPACE-UID-A" + namespaceBID = "NAMESPACE-UID-B" +) + +// TestProcessSingleDVONamespace_MustProcessEscapedString tests the behavior of ProcessSingleDVONamespace with the current +// escaped JSON string, the whole string is also wrapped in quotation marks +func TestProcessSingleDVONamespace_MustProcessEscapedString(t *testing.T) { + testServer := server.New(helpers.DefaultServerConfig, nil, nil) + + now := types.Timestamp(time.Now().UTC().Format(time.RFC3339)) + + dvoReport := types.DVOReport{ + OrgID: "1", + NamespaceID: namespaceBID, + NamespaceName: "namespace-name-B", + ClusterID: string(testdata.ClusterName), + Recommendations: 1, + Report: currentDvoReportFromDB, + Objects: 1, + ReportedAt: now, + LastCheckedAt: now, + } + + processedWorkload := testServer.ProcessSingleDVONamespace(dvoReport) + + assert.Equal(t, 1, len(processedWorkload.Recommendations)) + assert.Equal(t, 1, len(processedWorkload.Recommendations[0].Objects)) + assert.Equal(t, objectBUID, processedWorkload.Recommendations[0].Objects[0].UID) + assert.Equal(t, namespaceBID, processedWorkload.Namespace.UUID) + assert.Equal(t, 1, processedWorkload.Metadata.Objects) + assert.Equal(t, 1, processedWorkload.Metadata.Recommendations) + + samples, err := json.Marshal(processedWorkload.Recommendations[0].TemplateData["samples"]) + assert.NoError(t, err) + assert.GreaterOrEqual(t, len(string(samples)), 1) +} + +// TestProcessSingleDVONamespace_MustProcessCorrectString tests the behavior of the ProcessSingleDVONamespace with a +// correct string (no double escapes, leading, trailing quotes). This test demonstrates that we can fix the encoding +// without affecting the API response at all, as the function simply doesn't strip anything from the strings. +func TestProcessSingleDVONamespace_MustProcessCorrectString(t *testing.T) { + testServer := server.New(helpers.DefaultServerConfig, nil, nil) + + now := types.Timestamp(time.Now().UTC().Format(time.RFC3339)) + + dvoReport := types.DVOReport{ + OrgID: "1", + NamespaceID: namespaceBID, + NamespaceName: "namespace-name-B", + ClusterID: "193a2099-1234-5678-916a-d570c9aac158", + Recommendations: 1, + Report: fixedDvoReportFromDB, + Objects: 1, + ReportedAt: now, + LastCheckedAt: now, + } + + processedWorkload := testServer.ProcessSingleDVONamespace(dvoReport) + + assert.Equal(t, 1, len(processedWorkload.Recommendations)) + assert.Equal(t, 1, len(processedWorkload.Recommendations[0].Objects)) + assert.Equal(t, objectBUID, processedWorkload.Recommendations[0].Objects[0].UID) + assert.Equal(t, namespaceBID, processedWorkload.Namespace.UUID) + assert.Equal(t, 1, processedWorkload.Metadata.Objects) + assert.Equal(t, 1, processedWorkload.Metadata.Recommendations) + + samples, err := json.Marshal(processedWorkload.Recommendations[0].TemplateData["samples"]) + assert.NoError(t, err) + assert.GreaterOrEqual(t, len(string(samples)), 1) +} + +// TestProcessSingleDVONamespace_MustFilterZeroObjects_CCXDEV_12589_Reproducer +// tests the behavior of the ProcessSingleDVONamespace +// to filter out recommendations that have 0 objects/workloads for that particular namespace. +// Since we process the report every time, we need to filter out objects+namespaces that don't match +// the requested one. And since we can end up with 0 objects for that rule_ID + namespace after the filtering, +// we mustn't show this recommendation in the API, as it has no rule hits in reality. +func TestProcessSingleDVONamespace_MustFilterZeroObjects_CCXDEV_12589_Reproducer(t *testing.T) { + testServer := server.New(helpers.DefaultServerConfig, nil, nil) + + now := types.Timestamp(time.Now().UTC().Format(time.RFC3339)) + + dvoReport := types.DVOReport{ + OrgID: "1", + NamespaceID: namespaceAID, + NamespaceName: "namespace-name-A", + ClusterID: string(testdata.ClusterName), + Recommendations: 1, + Report: fixedDvoReportFromDB, + Objects: 2, + ReportedAt: now, + LastCheckedAt: now, + } + + processedWorkload := testServer.ProcessSingleDVONamespace(dvoReport) + + assert.Equal(t, 1, len(processedWorkload.Recommendations)) // <-- this would be 2 without CCXDEV-12589 fix + assert.Equal(t, 2, len(processedWorkload.Recommendations[0].Objects)) + expectedObjects := []server.DVOObject{ + { + UID: "193a2099-0000-1111-916a-d570c9aac158", + Kind: "Pod", + }, + { + UID: "193a2099-1234-5678-916a-d570c9aac158", + Kind: "DaemonSet", + }, + } + assert.ElementsMatch(t, expectedObjects, processedWorkload.Recommendations[0].Objects) + assert.Equal(t, namespaceAID, processedWorkload.Namespace.UUID) + + // check correct metadata as well + assert.Equal(t, 2, processedWorkload.Metadata.Objects) + assert.Equal(t, 1, processedWorkload.Metadata.Recommendations) + + samples, err := json.Marshal(processedWorkload.Recommendations[0].TemplateData["samples"]) + assert.NoError(t, err) + assert.GreaterOrEqual(t, len(string(samples)), 1) +} diff --git a/server/endpoints.go b/server/endpoints.go index e8f4970ac..83b9cb342 100644 --- a/server/endpoints.go +++ b/server/endpoints.go @@ -90,6 +90,11 @@ const ( // ClustersRecommendationsListEndpoint receives a list of clusters in POST body and returns a list of clusters with lists of hitting recommendations ClustersRecommendationsListEndpoint = "clusters/organizations/{org_id}/users/{user_id}/recommendations" + // DVOWorkloadRecommendations returns a list of cluster + namespace workloads for given organization ID. + DVOWorkloadRecommendations = "organization/{org_id}/workloads" + // DVOWorkloadRecommendationsSingleNamespace returns workloads for a single cluster + namespace ID. + DVOWorkloadRecommendationsSingleNamespace = "organization/{org_id}/namespace/{namespace}/cluster/{cluster}/workloads" + // Rating accepts a list of ratings in the request body and store them in the database for the given user Rating = "rules/organizations/{org_id}/rating" // GetRating retrieves the rating for a specific rule and user @@ -181,4 +186,7 @@ func (server *HTTPServer) addRuleEnableDisableEndpointsToRouter(router *mux.Rout func (server *HTTPServer) addInsightsAdvisorEndpointsToRouter(router *mux.Router, apiPrefix string) { router.HandleFunc(apiPrefix+RecommendationsListEndpoint, server.getRecommendations).Methods(http.MethodPost, http.MethodOptions) router.HandleFunc(apiPrefix+ClustersRecommendationsListEndpoint, server.getClustersRecommendationsList).Methods(http.MethodPost, http.MethodOptions) + + router.HandleFunc(apiPrefix+DVOWorkloadRecommendations, server.getWorkloads).Methods(http.MethodGet) + router.HandleFunc(apiPrefix+DVOWorkloadRecommendationsSingleNamespace, server.getWorkloadsForNamespace).Methods(http.MethodGet) } diff --git a/server/rating_test.go b/server/rating_test.go index 2149f97f1..9abc64008 100644 --- a/server/rating_test.go +++ b/server/rating_test.go @@ -77,7 +77,7 @@ func TestHTTPServer_getRuleRating_NoRating(t *testing.T) { } func TestHTTPServer_getRuleRating_OK(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.RateOnRule( @@ -99,7 +99,7 @@ func TestHTTPServer_getRuleRating_OK(t *testing.T) { } func TestHTTPServer_getRuleRating_MultipleOK(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.RateOnRule( diff --git a/server/report_benchmarks_test.go b/server/report_benchmarks_test.go index f11301f88..58fde7bb9 100644 --- a/server/report_benchmarks_test.go +++ b/server/report_benchmarks_test.go @@ -44,17 +44,15 @@ func BenchmarkHTTPServer_ReadReportForCluster(b *testing.B) { type testCase struct { storageName string - storageProvider func(testing.TB, bool) (storage.Storage, func()) + storageProvider func(testing.TB, bool) (storage.OCPRecommendationsStorage, func()) N uint } var testCases []testCase for _, n := range []uint{1, 10, 100, 1000} { - for storageName, storageProvider := range map[string]func(testing.TB, bool) (storage.Storage, func()){ - "SQLiteMemory": helpers.MustGetSQLiteMemoryStorage, - "SQLiteFile": helpers.MustGetSQLiteFileStorage, - "Postgres": helpers.MustGetPostgresStorage, + for storageName, storageProvider := range map[string]func(testing.TB, bool) (storage.OCPRecommendationsStorage, func()){ + "Postgres": helpers.MustGetPostgresStorage, } { testCases = append(testCases, testCase{ storageName, @@ -80,11 +78,11 @@ func BenchmarkHTTPServer_ReadReportForCluster(b *testing.B) { func benchmarkHTTPServerReadReportForCluster( b *testing.B, - mockStorage storage.Storage, + mockStorage storage.OCPRecommendationsStorage, testReportDataItems []testReportData, n uint, ) { - testServer := server.New(helpers.DefaultServerConfig, mockStorage) + testServer := server.New(helpers.DefaultServerConfig, mockStorage, nil) b.ResetTimer() for benchIndex := 0; benchIndex < b.N; benchIndex++ { @@ -128,7 +126,7 @@ type testReportData struct { clusterID types.ClusterName } -func initTestReports(b *testing.B, n uint, mockStorage storage.Storage, reportProvider func() (types.ClusterReport, []types.ReportItem)) []testReportData { +func initTestReports(b *testing.B, n uint, mockStorage storage.OCPRecommendationsStorage, reportProvider func() (types.ClusterReport, []types.ReportItem)) []testReportData { var testReportDataItems []testReportData for i := uint(0); i < n; i++ { diff --git a/server/server.go b/server/server.go index fdfa63588..df4b0bb41 100644 --- a/server/server.go +++ b/server/server.go @@ -80,18 +80,22 @@ const ( // HTTPServer in an implementation of Server interface type HTTPServer struct { - Config Configuration - Storage storage.Storage - Serv *http.Server - InfoParams map[string]string + Config Configuration + Storage storage.OCPRecommendationsStorage + StorageDvo storage.DVORecommendationsStorage + Serv *http.Server + InfoParams map[string]string + InfoParamsDVO map[string]string } // New constructs new implementation of Server interface -func New(config Configuration, storage storage.Storage) *HTTPServer { +func New(config Configuration, storage storage.OCPRecommendationsStorage, storageDVO storage.DVORecommendationsStorage) *HTTPServer { return &HTTPServer{ - Config: config, - Storage: storage, - InfoParams: make(map[string]string), + Config: config, + Storage: storage, + StorageDvo: storageDVO, + InfoParams: make(map[string]string), + InfoParamsDVO: make(map[string]string), } } diff --git a/server/server_read_report_metainfo_test.go b/server/server_read_report_metainfo_test.go index 30487ca36..bb7690106 100644 --- a/server/server_read_report_metainfo_test.go +++ b/server/server_read_report_metainfo_test.go @@ -77,7 +77,7 @@ func TestReadNonExistingReportMetainfo(t *testing.T) { } func TestReadExistingEmptyReportMetainfo(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -110,7 +110,7 @@ func TestReadExistingEmptyReportMetainfo(t *testing.T) { } func TestReadReportMetainfoDBError(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -124,7 +124,7 @@ func TestReadReportMetainfoDBError(t *testing.T) { } func TestReadReportMetainfo(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( diff --git a/server/server_read_report_test.go b/server/server_read_report_test.go index d5320b811..8110a3244 100644 --- a/server/server_read_report_test.go +++ b/server/server_read_report_test.go @@ -81,7 +81,7 @@ func TestReadNonExistingReport(t *testing.T) { } func TestHttpServer_readReportForCluster_NoRules(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -118,7 +118,7 @@ func TestHttpServer_readReportForCluster_NoRules(t *testing.T) { } func TestReadReportDBError(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -132,7 +132,7 @@ func TestReadReportDBError(t *testing.T) { } func TestReadReport(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -175,7 +175,7 @@ func TestReadReport(t *testing.T) { } func TestReadRuleReport(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -213,7 +213,7 @@ func TestReadRuleReport(t *testing.T) { // expecting the rule to be last and disabled, re-enables it and expects regular // response with Rule1 first again func TestReadReportDisableRule(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() now := time.Now() @@ -323,7 +323,7 @@ func TestReadReportDisableRule(t *testing.T) { } func TestReadReport_RuleDisableFeedback(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() now := time.Now() diff --git a/server/server_test.go b/server/server_test.go index e73ffc1f4..0d2825eca 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -88,7 +88,7 @@ func TestListOfClustersForNonExistingOrganization(t *testing.T) { } func TestListOfClustersForOrganizationOK(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -116,7 +116,7 @@ func TestListOfClustersForOrganizationOK(t *testing.T) { // TestListOfClustersForOrganizationDBError expects db error // because the storage is closed before the query func TestListOfClustersForOrganizationDBError(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -176,7 +176,7 @@ func TestListOfOrganizationsEmpty(t *testing.T) { } func TestListOfOrganizationsOK(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -213,7 +213,7 @@ func TestListOfOrganizationsOK(t *testing.T) { } func TestListOfOrganizationsDBError(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -226,6 +226,8 @@ func TestListOfOrganizationsDBError(t *testing.T) { } func TestServerStart(t *testing.T) { + mockStorageDVO, closer := helpers.MustGetPostgresStorageDVO(t, true) + closer() helpers.RunTestWithTimeout(t, func(t testing.TB) { s := server.New(server.Configuration{ // will use any free port @@ -234,7 +236,7 @@ func TestServerStart(t *testing.T) { Auth: true, Debug: true, MaximumFeedbackMessageLength: 255, - }, nil) + }, nil, mockStorageDVO) go func() { for { @@ -269,7 +271,7 @@ func TestServerStartError(t *testing.T) { Address: "localhost:99999", APIPrefix: "", MaximumFeedbackMessageLength: 255, - }, nil) + }, nil, nil) err := testServer.Start(nil) assert.EqualError(t, err, "listen tcp: address 99999: invalid port") @@ -328,7 +330,7 @@ func TestRuleFeedbackVote(t *testing.T) { } func(endpoint string) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -447,7 +449,7 @@ func checkBadRuleFeedbackRequest(t *testing.T, message, expectedStatus string) { "status": "` + expectedStatus + `" }` - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -505,7 +507,7 @@ func TestHTTPServer_GetVoteOnRule_BadRuleID(t *testing.T) { } func TestHTTPServer_GetVoteOnRule_DBError(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -514,7 +516,7 @@ func TestHTTPServer_GetVoteOnRule_DBError(t *testing.T) { ) helpers.FailOnError(t, err) - connection := mockStorage.(*storage.DBStorage).GetConnection() + connection := mockStorage.(*storage.OCPRecommendationsDBStorage).GetConnection() _, err = connection.Exec(`DROP TABLE cluster_rule_user_feedback;`) helpers.FailOnError(t, err) @@ -530,7 +532,7 @@ func TestHTTPServer_GetVoteOnRule_DBError(t *testing.T) { } func TestRuleFeedbackErrorClosedStorage(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -561,7 +563,7 @@ func TestHTTPServer_GetVoteOnRule(t *testing.T) { } func(endpoint string) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -613,7 +615,7 @@ func TestRuleToggle(t *testing.T) { } func(endpoint string) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -675,7 +677,7 @@ func TestHTTPServer_deleteOrganizations_NonIntOrgID(t *testing.T) { } func TestHTTPServer_deleteOrganizations_DBError(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -700,7 +702,7 @@ func TestHTTPServer_deleteClusters(t *testing.T) { } func TestHTTPServer_deleteClusters_DBError(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -725,7 +727,7 @@ func TestHTTPServer_deleteClusters_BadClusterName(t *testing.T) { } func TestHTTPServer_SaveDisableFeedback(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -773,7 +775,7 @@ func TestHTTPServer_SaveDisableFeedback_Error_BadClusterName(t *testing.T) { } func TestHTTPServer_SaveDisableFeedback_Error_CheckUserClusterPermissions(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -810,7 +812,7 @@ func TestHTTPServer_SaveDisableFeedback_Error_CheckUserClusterPermissions(t *tes } func TestHTTPServer_SaveDisableFeedback_Error_BadBody(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -837,7 +839,7 @@ func TestHTTPServer_SaveDisableFeedback_Error_BadBody(t *testing.T) { } func TestHTTPServer_SaveDisableFeedback_Error_DBError(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) err := mockStorage.WriteReportForCluster( testdata.OrgID, @@ -865,7 +867,7 @@ func TestHTTPServer_SaveDisableFeedback_Error_DBError(t *testing.T) { } func TestHTTPServer_ListDisabledRules(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -879,7 +881,7 @@ func TestHTTPServer_ListDisabledRules(t *testing.T) { } func TestHTTPServer_ListOfReasons(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -893,7 +895,7 @@ func TestHTTPServer_ListOfReasons(t *testing.T) { } func TestHTTPServer_ListDisabledRulesForClusters(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() clusterList := []types.ClusterName{testdata.ClusterName} @@ -911,7 +913,7 @@ func TestHTTPServer_ListDisabledRulesForClusters(t *testing.T) { } func TestHTTPServer_EnableRuleSystemWide(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -929,7 +931,7 @@ func TestHTTPServer_EnableRuleSystemWide(t *testing.T) { } func TestHTTPServer_EnableRuleSystemWideWrongOrgID(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -947,7 +949,7 @@ func TestHTTPServer_EnableRuleSystemWideWrongOrgID(t *testing.T) { } func TestHTTPServer_DisableRuleSystemWide(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -966,7 +968,7 @@ func TestHTTPServer_DisableRuleSystemWide(t *testing.T) { } func TestHTTPServer_DisableRuleSystemWideWrongOrgID(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -984,7 +986,7 @@ func TestHTTPServer_DisableRuleSystemWideWrongOrgID(t *testing.T) { } func TestHTTPServer_DisableRuleSystemWideNoJustification(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -1002,7 +1004,7 @@ func TestHTTPServer_DisableRuleSystemWideNoJustification(t *testing.T) { } func TestHTTPServer_UpdateRuleSystemWide(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -1021,7 +1023,7 @@ func TestHTTPServer_UpdateRuleSystemWide(t *testing.T) { } func TestHTTPServer_UpdateRuleSystemWideWrongOrgID(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -1039,7 +1041,7 @@ func TestHTTPServer_UpdateRuleSystemWideWrongOrgID(t *testing.T) { } func TestHTTPServer_UpdateRuleSystemWideNoJustification(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -1057,7 +1059,7 @@ func TestHTTPServer_UpdateRuleSystemWideNoJustification(t *testing.T) { } func TestHTTPServer_ReadRuleSystemWideNoRule(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -1075,7 +1077,7 @@ func TestHTTPServer_ReadRuleSystemWideNoRule(t *testing.T) { } func TestHTTPServer_ReadRuleSystemWideExistingRule(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() // disable rule first @@ -1108,7 +1110,7 @@ func TestHTTPServer_ReadRuleSystemWideExistingRule(t *testing.T) { } func TestHTTPServer_ReadRuleSystemWideWrongOrgID(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -1126,7 +1128,7 @@ func TestHTTPServer_ReadRuleSystemWideWrongOrgID(t *testing.T) { } func TestHTTPServer_ListOfDisabledRulesSystemWide(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -1140,7 +1142,7 @@ func TestHTTPServer_ListOfDisabledRulesSystemWide(t *testing.T) { } func TestHTTPServer_RecommendationsListEndpoint_NoRecommendations(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1163,7 +1165,7 @@ func TestHTTPServer_RecommendationsListEndpoint_NoRecommendations(t *testing.T) } func TestHTTPServer_RecommendationsListEndpoint_DifferentClusters(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1186,7 +1188,7 @@ func TestHTTPServer_RecommendationsListEndpoint_DifferentClusters(t *testing.T) } func TestHTTPServer_RecommendationsListEndpoint_3Recs1Cluster(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1216,7 +1218,7 @@ func TestHTTPServer_RecommendationsListEndpoint_3Recs1Cluster(t *testing.T) { } func TestHTTPServer_RecommendationsListEndpoint_3Recs2Clusters(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() clusterList := make([]types.ClusterName, 2) @@ -1282,7 +1284,7 @@ func TestRuleClusterDetailEndpoint_NoRowsFoundForGivenOrgDBError(t *testing.T) { func TestRuleClusterDetailEndpoint_NoRowsFoundForGivenSelectorDBError(t *testing.T) { const errStr = "Item with ID test.rule3|ek3 was not found in the storage" - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() _ = mockStorage.WriteRecommendationsForCluster(testdata.OrgID, testdata.ClusterName, testdata.Report2Rules, time.RFC3339) @@ -1300,7 +1302,7 @@ func TestRuleClusterDetailEndpoint_NoRowsFoundForGivenSelectorDBError(t *testing func TestRuleClusterDetailEndpoint_BadBodyInRequest(t *testing.T) { errStr := "Internal Server Error" - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() _ = mockStorage.WriteRecommendationsForCluster(testdata.OrgID, testdata.ClusterName, testdata.Report2Rules, time.RFC3339) @@ -1357,7 +1359,7 @@ func TestRuleClusterDetailEndpoint_OtherDBErrors(t *testing.T) { } func TestRuleClusterDetailEndpoint_ValidParameters(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() respBody := `{"clusters":[{"cluster":"%v", "cluster_name":"", "impacted":"%v", "meta":{"cluster_version":"%v"},"last_checked_at":"%v"}],"status":"ok"}` @@ -1423,7 +1425,7 @@ func TestRuleClusterDetailEndpoint_ValidParameters(t *testing.T) { } func TestRuleClusterDetailEndpoint_ValidParametersActiveClusters(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() respBody := `{"clusters":[{"cluster":"%v", "cluster_name":"", "impacted":"%v", "meta":{"cluster_version":"%v"}, "last_checked_at":"%v"}],"status":"ok"}` @@ -1465,7 +1467,7 @@ func TestRuleClusterDetailEndpoint_ValidParametersActiveClusters(t *testing.T) { } func TestRuleClusterDetailEndpoint_InvalidParametersActiveClusters(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() errStr := `Error during parsing param 'org_id' with value 'x'. Error: 'unsigned integer expected'` @@ -1500,7 +1502,7 @@ func TestRuleClusterDetailEndpoint_InvalidParametersActiveClusters(t *testing.T) // TestServeInfoMap checks the REST API server behaviour for info endpoint func TestServeInfoMap(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -1512,7 +1514,7 @@ func TestServeInfoMap(t *testing.T) { } func TestHTTPServer_ClustersRecommendationsListEndpoint_NoRecommendations(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1535,7 +1537,7 @@ func TestHTTPServer_ClustersRecommendationsListEndpoint_NoRecommendations(t *tes } func TestHTTPServer_ClustersRecommendationsListEndpoint_2Recs1Cluster(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() respBody := `{"recommendations":{"%v":["%v"],"%v":["%v"]},"status":"ok"}` @@ -1565,7 +1567,7 @@ func TestHTTPServer_ClustersRecommendationsListEndpoint_2Recs1Cluster(t *testing } func TestHTTPServer_ClustersRecommendationsListEndpoint_BadOrgIDBadRequest(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() clusterList := []types.ClusterName{testdata.GetRandomClusterID()} @@ -1582,7 +1584,7 @@ func TestHTTPServer_ClustersRecommendationsListEndpoint_BadOrgIDBadRequest(t *te } func TestHTTPServer_ClustersRecommendationsListEndpoint_MissingClusterListBadRequest(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() var clusterListBadType string @@ -1599,7 +1601,7 @@ func TestHTTPServer_ClustersRecommendationsListEndpoint_MissingClusterListBadReq } func TestHTTPServer_ListOfDisabledClusters_NoneDisabled(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() helpers.AssertAPIRequest(t, mockStorage, nil, &helpers.APIRequest{ @@ -1621,7 +1623,7 @@ func TestHTTPServer_ListOfDisabledClusters_NoneDisabled(t *testing.T) { } func TestHTTPServer_ListOfDisabledClusters_OneDisabled(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]types.ClusterName, 2) @@ -1660,7 +1662,7 @@ func TestHTTPServer_ListOfDisabledClusters_OneDisabled(t *testing.T) { } func TestHTTPServer_ListOfDisabledClusters_JustificationTests(t *testing.T) { - mockStorage, closer := helpers.MustGetMockStorage(t, true) + mockStorage, closer := helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]types.ClusterName, 2) diff --git a/server/vote_endpoints_benchmarks_test.go b/server/vote_endpoints_benchmarks_test.go index d3ff7db4d..4347976ff 100644 --- a/server/vote_endpoints_benchmarks_test.go +++ b/server/vote_endpoints_benchmarks_test.go @@ -35,20 +35,6 @@ import ( "github.com/RedHatInsights/insights-results-aggregator/types" ) -func BenchmarkHTTPServer_VoteEndpoints_WithSQLiteMemoryStorage(b *testing.B) { - mockStorage, closer := helpers.MustGetMockStorage(b, true) - defer closer() - - benchmarkHTTPServerVoteEndpointsWithStorage(b, mockStorage) -} - -func BenchmarkHTTPServer_VoteEndpoints_WithSQLiteFileStorage(b *testing.B) { - mockStorage, cleaner := helpers.MustGetSQLiteFileStorage(b, true) - defer cleaner() - - benchmarkHTTPServerVoteEndpointsWithStorage(b, mockStorage) -} - func BenchmarkHTTPServer_VoteEndpoints_WithPostgresStorage(b *testing.B) { mockStorage, cleaner := helpers.MustGetPostgresStorage(b, true) defer cleaner() @@ -56,7 +42,7 @@ func BenchmarkHTTPServer_VoteEndpoints_WithPostgresStorage(b *testing.B) { benchmarkHTTPServerVoteEndpointsWithStorage(b, mockStorage) } -func benchmarkHTTPServerVoteEndpointsWithStorage(b *testing.B, mockStorage storage.Storage) { +func benchmarkHTTPServerVoteEndpointsWithStorage(b *testing.B, mockStorage storage.OCPRecommendationsStorage) { zerolog.SetGlobalLevel(zerolog.WarnLevel) // each test case will choose random endpoint args from this pool @@ -65,7 +51,7 @@ func benchmarkHTTPServerVoteEndpointsWithStorage(b *testing.B, mockStorage stora endpointArgs := prepareVoteEndpointArgs(b, numberOfEndpointArgs, mockStorage) defer cleanupEndpointArgs(b, endpointArgs, mockStorage) - testServer := server.New(helpers.DefaultServerConfig, mockStorage) + testServer := server.New(helpers.DefaultServerConfig, mockStorage, nil) type TestCase struct { TestName string @@ -134,7 +120,7 @@ type voteEndpointArg struct { ErrorKey types.ErrorKey } -func prepareVoteEndpointArgs(tb testing.TB, numberOfEndpointArgs uint, mockStorage storage.Storage) []voteEndpointArg { +func prepareVoteEndpointArgs(tb testing.TB, numberOfEndpointArgs uint, mockStorage storage.OCPRecommendationsStorage) []voteEndpointArg { var endpointArgs []voteEndpointArg for i := uint(0); i < numberOfEndpointArgs; i++ { @@ -168,7 +154,7 @@ func prepareVoteEndpointArgs(tb testing.TB, numberOfEndpointArgs uint, mockStora return endpointArgs } -func cleanupEndpointArgs(tb testing.TB, args []voteEndpointArg, mockStorage storage.Storage) { +func cleanupEndpointArgs(tb testing.TB, args []voteEndpointArg, mockStorage storage.OCPRecommendationsStorage) { for _, arg := range args { err := mockStorage.DeleteReportsForCluster(arg.ClusterID) helpers.FailOnError(tb, err) diff --git a/shellcheck.sh b/shellcheck.sh index 51585f0b4..a669eceb8 100755 --- a/shellcheck.sh +++ b/shellcheck.sh @@ -14,10 +14,15 @@ # limitations under the License. if ! command -v shellcheck > /dev/null 2>&1; then + echo "Installing shellcheck" scversion="stable" # or "v0.4.7", or "latest" - wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.x86_64.tar.xz" | tar -xJv - shellcheck-stable/shellcheck --version - shellcheck-stable/shellcheck --exclude=SC1090,SC2086,SC2034,SC1091 -- *.sh */*.sh -else - shellcheck --exclude=SC1090,SC2086,SC2034,SC1091 -- *.sh */*.sh + unameOut="$(uname -s)" + case "${unameOut}" in + Linux*) wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.x86_64.tar.xz" | tar -xJv ;; + Darwin*) brew install shellcheck ;; + *) machine="UNKNOWN:${unameOut}" + esac + fi +shellcheck --version +shellcheck --exclude=SC1090,SC2086,SC2034,SC1091 -- *.sh */*.sh diff --git a/storage/configuration.go b/storage/configuration.go index bb39c6574..96d1c34df 100644 --- a/storage/configuration.go +++ b/storage/configuration.go @@ -19,7 +19,6 @@ package storage // Configuration represents configuration of data storage type Configuration struct { Driver string `mapstructure:"db_driver" toml:"db_driver"` - SQLiteDataSource string `mapstructure:"sqlite_datasource" toml:"sqlite_datasource"` LogSQLQueries bool `mapstructure:"log_sql_queries" toml:"log_sql_queries"` PGUsername string `mapstructure:"pg_username" toml:"pg_username"` PGPassword string `mapstructure:"pg_password" toml:"pg_password"` diff --git a/storage/debug.go b/storage/debug.go index c9fd5e3a6..7e4e30b3e 100644 --- a/storage/debug.go +++ b/storage/debug.go @@ -21,7 +21,7 @@ import ( ) // PrintRuleDisableDebugInfo is a temporary helper function used to print form cluster rule toggle related tables -func (storage DBStorage) PrintRuleDisableDebugInfo() { +func (storage OCPRecommendationsDBStorage) PrintRuleDisableDebugInfo() { err := storage.PrintRuleToggles() if err != nil { log.Error().Err(err).Msg("unable to print records from cluster_rule_toggle") @@ -35,7 +35,7 @@ func (storage DBStorage) PrintRuleDisableDebugInfo() { // PrintRuleToggles prints enable/disable counts for all rules // TEMPORARY because we currently don't have access to stage database when testing migrations. -func (storage DBStorage) PrintRuleToggles() error { +func (storage OCPRecommendationsDBStorage) PrintRuleToggles() error { log.Info().Msg("PrintRuleToggles start") query := ` @@ -75,7 +75,7 @@ func (storage DBStorage) PrintRuleToggles() error { // PrintRuleDisableFeedbacks prints enable/disable feedback counts for all rules // TEMPORARY because we currently don't have access to stage database when testing migrations. -func (storage DBStorage) PrintRuleDisableFeedbacks() error { +func (storage OCPRecommendationsDBStorage) PrintRuleDisableFeedbacks() error { log.Info().Msg("PrintRuleDisableFeedbacks start") query := ` diff --git a/storage/dvo_recommendations_storage.go b/storage/dvo_recommendations_storage.go new file mode 100644 index 000000000..7f36bc200 --- /dev/null +++ b/storage/dvo_recommendations_storage.go @@ -0,0 +1,563 @@ +/* +Copyright © 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +*/ + +package storage + +import ( + "database/sql" + "encoding/json" + "fmt" + "time" + + "github.com/rs/zerolog/log" + + "github.com/RedHatInsights/insights-results-aggregator/metrics" + "github.com/RedHatInsights/insights-results-aggregator/migration" + "github.com/RedHatInsights/insights-results-aggregator/migration/dvomigrations" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +// DVORecommendationsStorage represents an interface to almost any database or storage system +type DVORecommendationsStorage interface { + Init() error + Close() error + GetMigrations() []migration.Migration + GetDBDriverType() types.DBDriver + GetDBSchema() migration.Schema + GetConnection() *sql.DB + GetMaxVersion() migration.Version + MigrateToLatest() error + ReportsCount() (int, error) + WriteReportForCluster( + orgID types.OrgID, + clusterName types.ClusterName, + report types.ClusterReport, + workloads []types.WorkloadRecommendation, + lastCheckedTime time.Time, + gatheredAtTime time.Time, + storedAtTime time.Time, + requestID types.RequestID, + ) error + ReadWorkloadsForOrganization(types.OrgID) ([]types.DVOReport, error) + ReadWorkloadsForClusterAndNamespace( + types.OrgID, + types.ClusterName, + string, + ) (types.DVOReport, error) + DeleteReportsForOrg(orgID types.OrgID) error +} + +const ( + // dvoDBSchema represents the name of the DB schema used by DVO-related queries/migrations + dvoDBSchema = "dvo" + // orgIDStr used in log messages + orgIDStr = "orgID" +) + +// DVORecommendationsDBStorage is an implementation of Storage interface that use selected SQL like database +// like PostgreSQL or RDS etc. That implementation is based on the standard +// sql package. It is possible to configure connection via Configuration structure. +// SQLQueriesLog is log for sql queries, default is nil which means nothing is logged +type DVORecommendationsDBStorage struct { + connection *sql.DB + dbDriverType types.DBDriver + // clusterLastCheckedDict is a dictionary of timestamps when the clusters were last checked. + clustersLastChecked map[types.ClusterName]time.Time +} + +// NewDVORecommendationsStorage function creates and initializes a new instance of Storage interface +func NewDVORecommendationsStorage(configuration Configuration) (DVORecommendationsStorage, error) { + switch configuration.Type { + case types.SQLStorage: + log.Info().Str("DVO storage type", configuration.Type).Send() + return newDVOStorage(configuration) + case types.NoopStorage: + return newNoopDVOStorage(configuration) + default: + // error to be thrown + err := fmt.Errorf("Unknown storage type '%s'", configuration.Type) + log.Error().Err(err).Msg("Init failure") + return nil, err + } +} + +// newNoopDVOStorage function creates and initializes a new instance of Noop storage +func newNoopDVOStorage(_ Configuration) (DVORecommendationsStorage, error) { + return &NoopDVOStorage{}, nil +} + +// newDVOStorage function creates and initializes a new instance of DB storage +func newDVOStorage(configuration Configuration) (DVORecommendationsStorage, error) { + driverType, driverName, dataSource, err := initAndGetDriver(configuration) + if err != nil { + return nil, err + } + + log.Info().Msgf( + "Making connection to DVO data storage, driver=%s, connection string 'postgresql://%v:@%v:%v/%v?%v'", + driverName, + configuration.PGUsername, + configuration.PGHost, + configuration.PGPort, + configuration.PGDBName, + configuration.PGParams, + ) + + connection, err := sql.Open(driverName, dataSource) + if err != nil { + log.Error().Err(err).Msg("Can not connect to data storage") + return nil, err + } + + log.Debug().Msg("connection to DVO storage created") + + return NewDVORecommendationsFromConnection(connection, driverType), nil +} + +// NewDVORecommendationsFromConnection function creates and initializes a new instance of Storage interface from prepared connection +func NewDVORecommendationsFromConnection(connection *sql.DB, dbDriverType types.DBDriver) *DVORecommendationsDBStorage { + return &DVORecommendationsDBStorage{ + connection: connection, + dbDriverType: dbDriverType, + clustersLastChecked: map[types.ClusterName]time.Time{}, + } +} + +// Init performs all database initialization +// tasks necessary for further service operation. +func (storage DVORecommendationsDBStorage) Init() error { + // Read clusterName:LastChecked dictionary from DB. + rows, err := storage.connection.Query("SELECT cluster_id, last_checked_at FROM dvo.dvo_report;") + if err != nil { + return err + } + + log.Debug().Msg("executing last_checked_at query") + for rows.Next() { + var ( + clusterName types.ClusterName + lastChecked sql.NullTime + ) + + if err := rows.Scan(&clusterName, &lastChecked); err != nil { + if closeErr := rows.Close(); closeErr != nil { + log.Error().Err(closeErr).Msg("Unable to close the DB rows handle") + } + return err + } + + storage.clustersLastChecked[clusterName] = lastChecked.Time + } + + // Not using defer to close the rows here to: + // - make errcheck happy (it doesn't like ignoring returned errors), + // - return a possible error returned by the Close method. + return rows.Close() +} + +// Close method closes the connection to database. Needs to be called at the end of application lifecycle. +func (storage DVORecommendationsDBStorage) Close() error { + log.Info().Msg("Closing connection to data storage") + if storage.connection != nil { + err := storage.connection.Close() + if err != nil { + log.Error().Err(err).Msg("Can not close connection to data storage") + return err + } + } + return nil +} + +// GetMigrations returns a list of database migrations related to DVO recommendation tables +func (storage DVORecommendationsDBStorage) GetMigrations() []migration.Migration { + return dvomigrations.UsableDVOMigrations +} + +// GetDBDriverType returns db driver type +func (storage DVORecommendationsDBStorage) GetDBDriverType() types.DBDriver { + return storage.dbDriverType +} + +// GetConnection returns db connection(useful for testing) +func (storage DVORecommendationsDBStorage) GetConnection() *sql.DB { + return storage.connection +} + +// GetDBSchema returns the schema name to be used in queries +func (storage DVORecommendationsDBStorage) GetDBSchema() migration.Schema { + return migration.Schema(dvoDBSchema) +} + +// GetMaxVersion returns the highest available migration version. +// The DB version cannot be set to a value higher than this. +// This value is equivalent to the length of the list of available migrations. +func (storage DVORecommendationsDBStorage) GetMaxVersion() migration.Version { + return migration.Version(len(storage.GetMigrations())) +} + +// MigrateToLatest migrates the database to the latest available +// migration version. This must be done before an Init() call. +func (storage DVORecommendationsDBStorage) MigrateToLatest() error { + dbConn, dbSchema := storage.GetConnection(), storage.GetDBSchema() + + if err := migration.InitInfoTable(dbConn, dbSchema); err != nil { + return err + } + + return migration.SetDBVersion( + dbConn, + storage.dbDriverType, + dbSchema, + storage.GetMaxVersion(), + storage.GetMigrations(), + ) +} + +// ReportsCount reads number of all records stored in the dvo.dvo_report table +func (storage DVORecommendationsDBStorage) ReportsCount() (int, error) { + count := -1 + err := storage.connection.QueryRow("SELECT count(*) FROM dvo.dvo_report;").Scan(&count) + err = types.ConvertDBError(err, nil) + + return count, err +} + +// WriteReportForCluster writes result (health status) for selected cluster for given organization +func (storage DVORecommendationsDBStorage) WriteReportForCluster( + orgID types.OrgID, + clusterName types.ClusterName, + report types.ClusterReport, + workloads []types.WorkloadRecommendation, + lastCheckedTime time.Time, + _ time.Time, + _ time.Time, + _ types.RequestID, +) error { + // Skip writing the report if it isn't newer than a report + // that is already in the database for the same cluster. + if oldLastChecked, exists := storage.clustersLastChecked[clusterName]; exists && !lastCheckedTime.After(oldLastChecked) { + return types.ErrOldReport + } + + if storage.dbDriverType != types.DBDriverPostgres { + return fmt.Errorf("writing workloads with DB %v is not supported", storage.dbDriverType) + } + + // Begin a new transaction. + tx, err := storage.connection.Begin() + if err != nil { + return err + } + + err = func(tx *sql.Tx) error { + // Check if there is a more recent report for the cluster already in the database. + rows, err := tx.Query( + "SELECT last_checked_at FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2 AND last_checked_at > $3;", + orgID, clusterName, lastCheckedTime) + err = types.ConvertDBError(err, []interface{}{orgID, clusterName}) + if err != nil { + log.Error().Err(err).Msg("Unable to look up the most recent report in the database") + return err + } + + defer closeRows(rows) + + // If there is one, print a warning and discard the report (don't update it). + if rows.Next() { + log.Warn().Msgf("Database already contains report for organization %d and cluster name %s more recent than %v", + orgID, clusterName, lastCheckedTime) + return nil + } + + err = storage.updateReport(tx, orgID, clusterName, report, workloads, lastCheckedTime) + if err != nil { + return err + } + + storage.clustersLastChecked[clusterName] = lastCheckedTime + metrics.WrittenReports.Inc() + + return nil + }(tx) + + finishTransaction(tx, err) + + return err +} + +func (storage DVORecommendationsDBStorage) updateReport( + tx *sql.Tx, + orgID types.OrgID, + clusterName types.ClusterName, + report types.ClusterReport, + recommendations []types.WorkloadRecommendation, + lastCheckedTime time.Time, +) error { + // Get reported_at if present before deletion + reportedAtMap, err := storage.getReportedAtMap(orgID, clusterName) + if err != nil { + log.Error().Err(err).Msgf("Unable to get dvo report reported_at") + reportedAtMap = make(map[string]types.Timestamp) // create empty map + } + + // Delete previous reports (CCXDEV-12529) + _, err = tx.Exec("DELETE FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2;", orgID, clusterName) + if err != nil { + log.Err(err).Msgf("Unable to remove previous cluster DVO reports (org: %v, cluster: %v)", orgID, clusterName) + return err + } + + if len(recommendations) == 0 { + log.Info().Msgf("No new DVO report to insert (org: %v, cluster: %v)", + orgID, clusterName, + ) + return nil + } + + namespaceMap, objectsMap, namespaceRecommendationCount := mapWorkloadRecommendations(&recommendations) + + // Get the INSERT statement for writing a workload into the database. + workloadInsertStatement := storage.getReportInsertQuery() + + // Get values to be stored in dvo.dvo_report table + values := make([]interface{}, 9) + for namespaceUID, namespaceName := range namespaceMap { + values[0] = orgID // org_id + values[1] = clusterName // cluster_id + values[2] = namespaceUID // namespace_id + values[3] = namespaceName // namespace_name + + workloadAsJSON, err := json.Marshal(report) + if err != nil { + log.Error().Err(err).Msg("cannot store raw workload report") + values[4] = "{}" // report + } else { + values[4] = string(workloadAsJSON) // report + } + + values[5] = namespaceRecommendationCount[namespaceUID] // recommendations + values[6] = objectsMap[namespaceUID] // objects + + if reportedAt, ok := reportedAtMap[namespaceUID]; ok { + values[7] = reportedAt // reported_at + } else { + values[7] = lastCheckedTime + } + + values[8] = lastCheckedTime // last_checked_at + _, err = tx.Exec(workloadInsertStatement, values...) + if err != nil { + log.Err(err).Msgf("Unable to insert the cluster workloads (org: %v, cluster: %v)", + orgID, clusterName, + ) + return err + } + } + + return nil +} + +// mapWorkloadRecommendations filters out the data which is grouped by recommendations and aggregates +// them by namespace. +// Essentially we need to "invert" data from: +// - list of recommendations: list of workloads from ALL namespaces combined (objects can also be duplicate between recommendations) +// +// to: +// - list of namespaces: list of affected workloads and data aggregations for this particular namespace +func mapWorkloadRecommendations(recommendations *[]types.WorkloadRecommendation) ( + map[string]string, map[string]int, map[string]int, +) { + // map the namespace ID to the namespace name + namespaceMap := make(map[string]string) + // map how many recommendations hit per namespace + namespaceRecommendationCount := make(map[string]int) + // map the number of unique workloads affected by at least 1 rule per namespace + objectsPerNamespace := make(map[string]map[string]struct{}) + + for _, recommendation := range *recommendations { + // objectsMapPerRecommendation is used to calculate number of rule hits in namespace + objectsPerRecommendation := make(map[string]int) + + for i := range recommendation.Workloads { + workload := &recommendation.Workloads[i] + + if _, ok := namespaceMap[workload.NamespaceUID]; !ok { + // store the namespace name in the namespaceMap if it's not already there + namespaceMap[workload.NamespaceUID] = workload.Namespace + } + + // per single recommendation within namespace + objectsPerRecommendation[workload.NamespaceUID]++ + + // per whole namespace; just workload IDs with empty structs to filter out duplicate objects + if _, ok := objectsPerNamespace[workload.NamespaceUID]; !ok { + objectsPerNamespace[workload.NamespaceUID] = make(map[string]struct{}) + } + objectsPerNamespace[workload.NamespaceUID][workload.UID] = struct{}{} + } + + // increase rule hit count for affected namespaces + for namespace := range namespaceMap { + if _, ok := objectsPerRecommendation[namespace]; ok { + namespaceRecommendationCount[namespace]++ + } + } + } + + uniqueObjectsMap := make(map[string]int) + // count the number of unique objects per namespace + for namespace, objects := range objectsPerNamespace { + uniqueObjectsMap[namespace] = len(objects) + } + + return namespaceMap, uniqueObjectsMap, namespaceRecommendationCount +} + +// getRuleKeyCreatedAtMap returns a map between +// (rule_fqdn, error_key) -> created_at +// for each rule_hit rows matching given +// orgId and clusterName +func (storage DVORecommendationsDBStorage) getReportedAtMap(orgID types.OrgID, clusterName types.ClusterName) (map[string]types.Timestamp, error) { + query := "SELECT namespace_id, reported_at FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2;" + reportedAtRows, err := storage.connection.Query( + query, orgID, clusterName) + if err != nil { + log.Error().Err(err).Msg("error retrieving dvo.dvo_report created_at timestamp") + return nil, err + } + defer closeRows(reportedAtRows) + + reportedAtMap := make(map[string]types.Timestamp) + for reportedAtRows.Next() { + var namespaceID string + var reportedAt time.Time + err := reportedAtRows.Scan( + &namespaceID, + &reportedAt, + ) + if err != nil { + log.Error().Err(err).Msg("error scanning for rule id -> created_at map") + continue + } + reportedAtMap[namespaceID] = types.Timestamp(reportedAt.UTC().Format(time.RFC3339)) + } + return reportedAtMap, err +} + +// ReadWorkloadsForOrganization returns all rows from dvo.dvo_report table for given organizaiton +func (storage DVORecommendationsDBStorage) ReadWorkloadsForOrganization(orgID types.OrgID) ( + workloads []types.DVOReport, + err error, +) { + tStart := time.Now() + query := ` + SELECT cluster_id, namespace_id, namespace_name, recommendations, objects, reported_at, last_checked_at + FROM dvo.dvo_report + WHERE org_id = $1 + ` + + // #nosec G202 + rows, err := storage.connection.Query(query, orgID) + + err = types.ConvertDBError(err, orgID) + if err != nil { + return workloads, err + } + + defer closeRows(rows) + + var ( + dvoReport types.DVOReport + lastCheckedAtDB sql.NullTime + reportedAtDB sql.NullTime + count uint + ) + for rows.Next() { + err = rows.Scan( + &dvoReport.ClusterID, + &dvoReport.NamespaceID, + &dvoReport.NamespaceName, + &dvoReport.Recommendations, + &dvoReport.Objects, + &reportedAtDB, + &lastCheckedAtDB, + ) + if err != nil { + log.Error().Err(err).Msg("ReadWorkloadsForOrganization") + } + + // convert timestamps to string + dvoReport.LastCheckedAt = types.Timestamp(lastCheckedAtDB.Time.UTC().Format(time.RFC3339)) + dvoReport.ReportedAt = types.Timestamp(reportedAtDB.Time.UTC().Format(time.RFC3339)) + + workloads = append(workloads, dvoReport) + count++ + } + + log.Debug().Int(orgIDStr, int(orgID)).Msgf("ReadWorkloadsForOrganization processed %d rows in %v", count, time.Since(tStart)) + + return workloads, err +} + +// ReadWorkloadsForClusterAndNamespace returns a single result from the dvo.dvo_report table +func (storage DVORecommendationsDBStorage) ReadWorkloadsForClusterAndNamespace( + orgID types.OrgID, + clusterID types.ClusterName, + namespaceID string, +) ( + workload types.DVOReport, + err error, +) { + tStart := time.Now() + query := ` + SELECT cluster_id, namespace_id, namespace_name, recommendations, report, objects, reported_at, last_checked_at + FROM dvo.dvo_report + WHERE org_id = $1 + AND cluster_id = $2 + AND namespace_id = $3 + ` + + var ( + dvoReport types.DVOReport + lastCheckedAtDB sql.NullTime + reportedAtDB sql.NullTime + ) + err = storage.connection.QueryRow(query, orgID, clusterID, namespaceID).Scan( + &dvoReport.ClusterID, + &dvoReport.NamespaceID, + &dvoReport.NamespaceName, + &dvoReport.Recommendations, + &dvoReport.Report, + &dvoReport.Objects, + &reportedAtDB, + &lastCheckedAtDB, + ) + if err == sql.ErrNoRows { + return workload, &types.ItemNotFoundError{ItemID: fmt.Sprintf("%d:%s:%s", orgID, clusterID, namespaceID)} + } + // convert timestamps to string + dvoReport.LastCheckedAt = types.Timestamp(lastCheckedAtDB.Time.UTC().Format(time.RFC3339)) + dvoReport.ReportedAt = types.Timestamp(reportedAtDB.Time.UTC().Format(time.RFC3339)) + + log.Debug().Int(orgIDStr, int(orgID)).Msgf("ReadWorkloadsForClusterAndNamespace took %v", time.Since(tStart)) + + return dvoReport, err +} + +// DeleteReportsForOrg deletes all reports related to the specified organization from the storage. +func (storage DVORecommendationsDBStorage) DeleteReportsForOrg(orgID types.OrgID) error { + _, err := storage.connection.Exec("DELETE FROM dvo.dvo_report WHERE org_id = $1;", orgID) + return err +} diff --git a/storage/dvo_recommendations_storage_test.go b/storage/dvo_recommendations_storage_test.go new file mode 100644 index 000000000..3b0cf0290 --- /dev/null +++ b/storage/dvo_recommendations_storage_test.go @@ -0,0 +1,780 @@ +/* +Copyright © 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage_test + +import ( + "encoding/json" + "fmt" + "strconv" + "testing" + "time" + + "database/sql" + "database/sql/driver" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + + "github.com/RedHatInsights/insights-operator-utils/tests/helpers" + "github.com/RedHatInsights/insights-results-aggregator-data/testdata" + "github.com/RedHatInsights/insights-results-aggregator/storage" + ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +var ( + now = time.Now().UTC() + nowAfterOneHour = now.Add(1 * time.Hour).UTC() + dummyTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + + namespaceAUID = "NAMESPACE-UID-A" + namespaceBUID = "NAMESPACE-UID-B" + + namespaceAWorkload = types.DVOWorkload{ + Namespace: "namespace-name-A", + NamespaceUID: namespaceAUID, + Kind: "DaemonSet", + Name: "test-name-0099", + UID: "UID-0099", + } + namespaceAWorkload2 = types.DVOWorkload{ + Namespace: "namespace-name-A", + NamespaceUID: namespaceAUID, + Kind: "Pod", + Name: "test-name-0001", + UID: "UID-0001", + } + namespaceBWorkload = types.DVOWorkload{ + Namespace: "namespace-name-B", + NamespaceUID: namespaceBUID, + Kind: "NotDaemonSet", + Name: "test-name-1199", + UID: "UID-1199", + } + validDVORecommendation = []types.WorkloadRecommendation{ + { + ResponseID: "an_issue|DVO_AN_ISSUE", + Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", + Key: "DVO_AN_ISSUE", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + "samples": []interface{}{ + map[string]interface{}{ + "namespace_uid": namespaceAUID, "kind": "DaemonSet", "uid": "193a2099-1234-5678-916a-d570c9aac158", + }, + }, + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{namespaceAWorkload}, + }, + } + validReport = `{"system":{"metadata":{},"hostname":null},"fingerprints":[],"version":1,"analysis_metadata":{},"workload_recommendations":[{"response_id":"an_issue|DVO_AN_ISSUE","component":"ccx_rules_ocp.external.dvo.an_issue_pod.recommendation","key":"DVO_AN_ISSUE","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","uid":"193a2099-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","name":"test-name-0099","uid":"UID-0099"}]}]}` + validReport2Rules2Namespaces = `{"system":{"metadata":{},"hostname":null},"fingerprints":[],"version":1,"analysis_metadata":{},"workload_recommendations":[{"response_id":"unset_requirements|DVO_UNSET_REQUIREMENTS","component":"ccx_rules_ocp.external.dvo.unset_requirements.recommendation","key":"DVO_UNSET_REQUIREMENTS","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","uid":"193a2099-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","name":"test-name-0099","uid":"193a2099-1234-5678-916a-d570c9aac158"},{"namespace":"namespace-name-B","namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","name":"test-name-1234","uid":"12345678-1234-5678-916a-d570c9aac158"}]},{"response_id":"excluded_pod|EXCLUDED_POD","component":"ccx_rules_ocp.external.dvo.excluded_pod.recommendation","key":"EXCLUDED_POD","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","uid":"12345678-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-B","namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","name":"test-name-1234","uid":"12345678-1234-5678-916a-d570c9aac158"}]}]}` + + twoNamespacesRecommendation = []types.WorkloadRecommendation{ + { + ResponseID: "an_issue|DVO_AN_ISSUE", + Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", + Key: "DVO_AN_ISSUE", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + "samples": []interface{}{ + map[string]interface{}{ + "namespace_uid": namespaceAUID, "kind": "DaemonSet", "uid": "193a2099-1234-5678-916a-d570c9aac158", + }, + }, + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{namespaceAWorkload, namespaceBWorkload}, + }, + } + + recommendation1TwoNamespaces = types.WorkloadRecommendation{ + ResponseID: "an_issue|DVO_AN_ISSUE", + Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", + Key: "DVO_AN_ISSUE", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{namespaceAWorkload, namespaceBWorkload}, + } + + recommendation2OneNamespace = types.WorkloadRecommendation{ + ResponseID: "unset_requirements|DVO_UNSET_REQUIREMENTS", + Component: "ccx_rules_ocp.external.dvo.unset_requirements.recommendation", + Key: "DVO_UNSET_REQUIREMENTS", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{namespaceAWorkload, namespaceAWorkload2}, + } + + recommendation3OneNamespace = types.WorkloadRecommendation{ + ResponseID: "bad_requirements|BAD_REQUIREMENTS", + Component: "ccx_rules_ocp.external.dvo.bad_requirements.recommendation", + Key: "BAD_REQUIREMENTS", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{namespaceAWorkload, namespaceAWorkload2}, + } +) + +func init() { + zerolog.SetGlobalLevel(zerolog.DebugLevel) +} + +func TestDVOStorage_Init(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + dbStorage := mockStorage.(*storage.DVORecommendationsDBStorage) + + err := dbStorage.MigrateToLatest() + helpers.FailOnError(t, err) +} + +// TestNewDVOStorageError checks whether constructor for new DVO storage returns error for improper storage configuration +func TestNewDVOStorageError(t *testing.T) { + _, err := storage.NewDVORecommendationsStorage(storage.Configuration{ + Driver: "non existing driver", + Type: "sql", + }) + assert.EqualError(t, err, "driver non existing driver is not supported") +} + +// TestNewDVOStorageNoType checks whether constructor for new DVO storage returns error for improper storage configuration +func TestNewDVOStorageNoType(t *testing.T) { + _, err := storage.NewDVORecommendationsStorage(storage.Configuration{ + Driver: "non existing driver", + }) + assert.EqualError(t, err, "Unknown storage type ''") +} + +// TestNewDVOStorageWrongType checks whether constructor for new DVO storage returns error for improper storage configuration +func TestNewDVOStorageWrongType(t *testing.T) { + _, err := storage.NewDVORecommendationsStorage(storage.Configuration{ + Driver: "non existing driver", + Type: "foobar", + }) + assert.EqualError(t, err, "Unknown storage type 'foobar'") +} + +// TestNewDVOStorageReturnedImplementation check what implementation of storage is returnd +func TestNewDVOStorageReturnedImplementation(t *testing.T) { + s, _ := storage.NewDVORecommendationsStorage(storage.Configuration{ + Driver: "postgres", + PGPort: 1234, + PGUsername: "user", + LogSQLQueries: true, + Type: "sql", + }) + assert.IsType(t, &storage.DVORecommendationsDBStorage{}, s) + + s, _ = storage.NewDVORecommendationsStorage(storage.Configuration{ + Driver: "postgres", + PGPort: 1234, + PGUsername: "user", + LogSQLQueries: true, + Type: "noop", + }) + assert.IsType(t, &storage.NoopDVOStorage{}, s) + + s, _ = storage.NewDVORecommendationsStorage(storage.Configuration{ + Driver: "postgres", + PGPort: 1234, + PGUsername: "user", + LogSQLQueries: true, + Type: "redis", + }) + assert.Nil(t, s, "redis type is not supported for DVO storage") +} + +// TestDVOStorageWriteReportForClusterClosedStorage check the behaviour of method WriteReportForCluster +func TestDVOStorageWriteReportForClusterClosedStorage(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + // we need to close storage right now + closer() + + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + testdata.ClusterReportEmpty, + validDVORecommendation, + now, + dummyTime, + dummyTime, + testdata.RequestID1, + ) + assert.EqualError(t, err, "sql: database is closed") +} + +// TestDVOStorageWriteReportForClusterUnsupportedDriverError check the behaviour of method WriteReportForCluster +func TestDVOStorageWriteReportForClusterUnsupportedDriverError(t *testing.T) { + fakeStorage := storage.NewDVORecommendationsFromConnection(nil, -1) + // no need to close it + + err := fakeStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + testdata.ClusterReportEmpty, + validDVORecommendation, + now, + dummyTime, + dummyTime, + testdata.RequestID1, + ) + assert.EqualError(t, err, "writing workloads with DB -1 is not supported") +} + +// TestDVOStorageWriteReportForClusterMoreRecentInDB checks that older report +// will not replace a more recent one when writing a report to storage. +func TestDVOStorageWriteReportForClusterMoreRecentInDB(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + newerTime := now.UTC() + olderTime := newerTime.Add(-time.Hour) + + // Insert newer report. + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + testdata.ClusterReportEmpty, + validDVORecommendation, + newerTime, + dummyTime, + dummyTime, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + // Try to insert older report. + err = mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + testdata.ClusterReportEmpty, + validDVORecommendation, + olderTime, + now, + now, + testdata.RequestID1, + ) + assert.Equal(t, types.ErrOldReport, err) +} + +// TestDVOStorageWriteReportForClusterDroppedReportTable checks the error +// returned when trying to SELECT from a dropped/missing dvo.dvo_report table. +func TestDVOStorageWriteReportForClusterDroppedReportTable(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + connection := storage.GetConnectionDVO(mockStorage.(*storage.DVORecommendationsDBStorage)) + + query := "DROP TABLE dvo.dvo_report CASCADE;" + + _, err := connection.Exec(query) + helpers.FailOnError(t, err) + + err = mockStorage.WriteReportForCluster( + testdata.OrgID, testdata.ClusterName, testdata.ClusterReportEmpty, + validDVORecommendation, now, now, now, + testdata.RequestID1, + ) + assert.EqualError(t, err, "no such table: dvo.dvo_report") +} + +func TestDVOStorageWriteReportForClusterFakePostgresOK(t *testing.T) { + mockStorage, expects := ira_helpers.MustGetMockStorageWithExpectsForDriverDVO(t, types.DBDriverPostgres) + defer ira_helpers.MustCloseMockStorageWithExpectsDVO(t, mockStorage, expects) + + expects.ExpectBegin() + + expects.ExpectQuery(`SELECT last_checked_at FROM dvo.dvo_report`). + WillReturnRows(expects.NewRows([]string{"last_checked_at"})). + RowsWillBeClosed() + + expects.ExpectExec("DELETE FROM dvo.dvo_report"). + WillReturnResult(driver.ResultNoRows) + + expects.ExpectExec("INSERT INTO dvo.dvo_report"). + WillReturnResult(driver.ResultNoRows) + + expects.ExpectCommit() + expects.ExpectClose() + expects.ExpectClose() + + err := mockStorage.WriteReportForCluster( + testdata.OrgID, testdata.ClusterName, `{"test": "report"}`, + validDVORecommendation, testdata.LastCheckedAt, now, now, + testdata.RequestID1) + helpers.FailOnError(t, mockStorage.Close()) + helpers.FailOnError(t, err) +} + +func TestDVOStorageWriteReportForClusterCheckItIsStored(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + err := mockStorage.DeleteReportsForOrg(testdata.OrgID) + helpers.FailOnError(t, err) + + err = mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + validDVORecommendation, + now, + dummyTime, + dummyTime, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + row := mockStorage.GetConnection().QueryRow( + "SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2;", + testdata.OrgID, testdata.ClusterName, + ) + checkStoredReport(t, row, namespaceAWorkload, 1, now, now) +} + +func TestDVOStorageWriteReportForClusterCheckPreviousIsDeleted(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + err := mockStorage.DeleteReportsForOrg(testdata.OrgID) + helpers.FailOnError(t, err) + + err = mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + twoNamespacesRecommendation, + now, + dummyTime, + dummyTime, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + // Check both namespaces are stored in the DB + row := mockStorage.GetConnection().QueryRow(` + SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at + FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2 AND namespace_id = $3;`, + testdata.OrgID, testdata.ClusterName, namespaceAWorkload.NamespaceUID, + ) + checkStoredReport(t, row, namespaceAWorkload, 1, now, now) + row = mockStorage.GetConnection().QueryRow(` + SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at + FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2 AND namespace_id = $3;`, + testdata.OrgID, testdata.ClusterName, namespaceBWorkload.NamespaceUID, + ) + checkStoredReport(t, row, namespaceBWorkload, 1, now, now) + + // Now receive a report with just one namespace for the same cluster + err = mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + validDVORecommendation, + nowAfterOneHour, + dummyTime, + dummyTime, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + // Make sure just one namespace is in the DB now + row = mockStorage.GetConnection().QueryRow(` + SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at + FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2 AND namespace_id = $3;`, + testdata.OrgID, testdata.ClusterName, namespaceAWorkload.NamespaceUID, + ) + checkStoredReport(t, row, namespaceAWorkload, 1, nowAfterOneHour, now) + row = mockStorage.GetConnection().QueryRow(` + SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at + FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2 AND namespace_id = $3;`, + testdata.OrgID, testdata.ClusterName, namespaceBWorkload.NamespaceUID, + ) + checkRowDoesntExist(t, row) +} + +func checkStoredReport(t *testing.T, row *sql.Row, want types.DVOWorkload, wantObjects int, wantLastChecked, wantReportedAt time.Time) { + var ( + namespaceID string + namespaceName string + report types.ClusterReport + recommendations int + objects int + lastChecked time.Time + reportedAt time.Time + ) + + err := row.Scan(&namespaceID, &namespaceName, &report, &recommendations, &objects, &lastChecked, &reportedAt) + helpers.FailOnError(t, err) + + unquotedReport, err := strconv.Unquote(string(report)) + helpers.FailOnError(t, err) + var gotWorkloads types.DVOMetrics + err = json.Unmarshal([]byte(unquotedReport), &gotWorkloads) + helpers.FailOnError(t, err) + + assert.Equal(t, want.NamespaceUID, namespaceID, "the column namespace_id is different than expected") + assert.Equal(t, want.Namespace, namespaceName, "the column namespace_name is different than expected") + assert.Equal(t, validDVORecommendation, gotWorkloads.WorkloadRecommendations, "the column report is different than expected") + assert.Equal(t, 1, recommendations, "the column recommendations is different than expected") + assert.Equal(t, wantObjects, objects, "the column objects is different than expected") + assert.Equal(t, wantLastChecked.Truncate(time.Second), lastChecked.UTC().Truncate(time.Second), "the column reported_at is different than expected") + assert.Equal(t, wantReportedAt.Truncate(time.Second), reportedAt.UTC().Truncate(time.Second), "the column last_checked_at is different than expected") +} + +func checkRowDoesntExist(t *testing.T, row *sql.Row) { + var ( + namespaceID string + namespaceName string + report types.ClusterReport + recommendations int + objects int + lastChecked time.Time + reportedAt time.Time + ) + + err := row.Scan(&namespaceID, &namespaceName, &report, &recommendations, &objects, &lastChecked, &reportedAt) + assert.ErrorIs(t, err, sql.ErrNoRows, "a row was found for this queryß") +} + +// TestDVOStorageReadWorkloadsForOrganization tests timestamps being kept correctly +func TestDVOStorageReadWorkloadsForOrganization(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + twoNamespacesRecommendation, + now, + now, + now, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + // write new archive with newer timestamp, old reported_at must be kept + err = mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + twoNamespacesRecommendation, + nowAfterOneHour, + nowAfterOneHour, + nowAfterOneHour, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + workloads, err := mockStorage.ReadWorkloadsForOrganization(testdata.OrgID) + helpers.FailOnError(t, err) + + assert.Equal(t, testdata.ClusterName, types.ClusterName(workloads[0].ClusterID)) + assert.Equal(t, types.Timestamp(nowAfterOneHour.UTC().Format(time.RFC3339)), workloads[0].LastCheckedAt) + assert.Equal(t, types.Timestamp(now.UTC().Format(time.RFC3339)), workloads[0].ReportedAt) +} + +// TestDVOStorageReadWorkloadsForNamespace tests timestamps being kept correctly +func TestDVOStorageReadWorkloadsForNamespace_Timestamps(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + twoNamespacesRecommendation, + now, + now, + now, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + // write new archive with newer timestamp, old reported_at must be kept + err = mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + twoNamespacesRecommendation, + nowAfterOneHour, + nowAfterOneHour, + nowAfterOneHour, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + helpers.FailOnError(t, err) + + assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) + assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, uint(1), report.Recommendations) + assert.Equal(t, uint(1), report.Objects) + assert.Equal(t, types.Timestamp(nowAfterOneHour.UTC().Format(time.RFC3339)), report.LastCheckedAt) + assert.Equal(t, types.Timestamp(now.UTC().Format(time.RFC3339)), report.ReportedAt) +} + +// TestDVOStorageReadWorkloadsForNamespace tests the behavior when we insert 1 recommendation and 1 object +// and then 1 recommendation with 2 objects +func TestDVOStorageReadWorkloadsForNamespace_TwoObjectsOneNamespace(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + nowTstmp := types.Timestamp(now.UTC().Format(time.RFC3339)) + + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + twoNamespacesRecommendation, + now, + now, + now, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + helpers.FailOnError(t, err) + + assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) + assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, uint(1), report.Recommendations) + assert.Equal(t, uint(1), report.Objects) + assert.Equal(t, nowTstmp, report.ReportedAt) + assert.Equal(t, nowTstmp, report.LastCheckedAt) + + newerReport2Objs := twoNamespacesRecommendation + newerReport2Objs[0].Workloads = []types.DVOWorkload{namespaceAWorkload, namespaceAWorkload2, namespaceBWorkload} + // write new archive with newer timestamp and 1 more object in the recommendation hit + err = mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + newerReport2Objs, + nowAfterOneHour, + nowAfterOneHour, + nowAfterOneHour, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + report, err = mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + helpers.FailOnError(t, err) + + assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) + assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, uint(1), report.Recommendations) + assert.Equal(t, uint(2), report.Objects) // <-- two objs now + + // reported_at keeps timestamp, last_checked_at gets updated + assert.Equal(t, types.Timestamp(nowAfterOneHour.UTC().Format(time.RFC3339)), report.LastCheckedAt) + assert.Equal(t, nowTstmp, report.ReportedAt) +} + +// TestDVOStorageReadWorkloadsForNamespace tests the behavior when we insert 1 recommendation and 1 object +// and then 1 recommendation with 2 objects +func TestDVOStorageWriteReport_TwoNamespacesTwoRecommendations(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + nowTstmp := types.Timestamp(now.UTC().Format(time.RFC3339)) + + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport2Rules2Namespaces), + []types.WorkloadRecommendation{recommendation1TwoNamespaces, recommendation2OneNamespace}, + now, + now, + now, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + expectedWorkloads := []types.DVOReport{ + { + NamespaceID: namespaceAUID, + NamespaceName: namespaceAWorkload.Namespace, + ClusterID: string(testdata.ClusterName), + Recommendations: uint(2), + Objects: uint(2), // <-- must be 2, because one workload is hitting more recommendations, but counts as 1 + ReportedAt: nowTstmp, + LastCheckedAt: nowTstmp, + }, + { + NamespaceID: namespaceBUID, + NamespaceName: namespaceBWorkload.Namespace, + ClusterID: string(testdata.ClusterName), + Recommendations: uint(1), // <-- must contain only 1 rule, the other rule wasn't hitting this ns + Objects: uint(1), + ReportedAt: nowTstmp, + LastCheckedAt: nowTstmp, + }, + } + + workloads, err := mockStorage.ReadWorkloadsForOrganization(testdata.OrgID) + helpers.FailOnError(t, err) + + assert.Equal(t, 2, len(workloads)) + assert.ElementsMatch(t, expectedWorkloads, workloads) + + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + helpers.FailOnError(t, err) + + assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) + assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, uint(2), report.Recommendations) + assert.Equal(t, uint(2), report.Objects) + assert.Equal(t, nowTstmp, report.ReportedAt) + assert.Equal(t, nowTstmp, report.LastCheckedAt) +} + +func TestDVOStorageWriteReport_FilterOutDuplicateObjects_CCXDEV_12608_Reproducer(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + nowTstmp := types.Timestamp(now.UTC().Format(time.RFC3339)) + + // writing 3 recommendations, but objects/workloads are hitting multiple recommendations + // and need to be filtered out + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport2Rules2Namespaces), + []types.WorkloadRecommendation{ + recommendation1TwoNamespaces, + recommendation2OneNamespace, + recommendation3OneNamespace, + }, + now, + now, + now, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + expectedWorkloads := []types.DVOReport{ + { + NamespaceID: namespaceAUID, + NamespaceName: namespaceAWorkload.Namespace, + ClusterID: string(testdata.ClusterName), + Recommendations: uint(3), + Objects: uint(2), // <-- must be 2, because workloadA and workloadB are hitting more rules, but count as 1 within a namespace + ReportedAt: nowTstmp, + LastCheckedAt: nowTstmp, + }, + { + NamespaceID: namespaceBUID, + NamespaceName: namespaceBWorkload.Namespace, + ClusterID: string(testdata.ClusterName), + Recommendations: uint(1), // <-- must contain only 1 rule, the other rules weren't affecting this namespace + Objects: uint(1), // <-- same as ^ + ReportedAt: nowTstmp, + LastCheckedAt: nowTstmp, + }, + } + + workloads, err := mockStorage.ReadWorkloadsForOrganization(testdata.OrgID) + helpers.FailOnError(t, err) + + assert.Equal(t, 2, len(workloads)) + assert.ElementsMatch(t, expectedWorkloads, workloads) + + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + helpers.FailOnError(t, err) + + assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) + assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, uint(3), report.Recommendations) + assert.Equal(t, uint(2), report.Objects) + assert.Equal(t, nowTstmp, report.ReportedAt) + assert.Equal(t, nowTstmp, report.LastCheckedAt) +} + +// TestDVOStorageReadWorkloadsForNamespace_MissingData tests wht happens if the data is missing +func TestDVOStorageReadWorkloadsForNamespace_MissingData(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + // write data for namespaceA and testdata.ClusterName + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(validReport), + validDVORecommendation, + now, + now, + now, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + t.Run("cluster and namespace exist", func(t *testing.T) { + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + helpers.FailOnError(t, err) + assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) + assert.Equal(t, namespaceAUID, report.NamespaceID) + }) + + t.Run("cluster exists and namespace doesn't", func(t *testing.T) { + _, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceBUID) + assert.Equal(t, &types.ItemNotFoundError{ItemID: fmt.Sprintf("%d:%s:%s", testdata.OrgID, testdata.ClusterName, namespaceBUID)}, err) + }) + + t.Run("namespace exists and cluster doesn't", func(t *testing.T) { + nonExistingCluster := types.ClusterName("a6fe3cd2-2c6a-48b8-a58d-b05853d47f4f") + _, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, nonExistingCluster, namespaceAUID) + assert.Equal(t, &types.ItemNotFoundError{ItemID: fmt.Sprintf("%d:%s:%s", testdata.OrgID, nonExistingCluster, namespaceAUID)}, err) + }) +} diff --git a/storage/export_test.go b/storage/export_test.go index 7bbc58628..6614a29b6 100644 --- a/storage/export_test.go +++ b/storage/export_test.go @@ -47,20 +47,24 @@ var ( GetRuleHitsCSV = getRuleHitsCSV ) -func GetConnection(storage *DBStorage) *sql.DB { +func GetConnection(storage *OCPRecommendationsDBStorage) *sql.DB { return storage.connection } -func GetClustersLastChecked(storage *DBStorage) map[types.ClusterName]time.Time { +func GetConnectionDVO(storage *DVORecommendationsDBStorage) *sql.DB { + return storage.connection +} + +func GetClustersLastChecked(storage *OCPRecommendationsDBStorage) map[types.ClusterName]time.Time { return storage.clustersLastChecked } -func SetClustersLastChecked(storage *DBStorage, cluster types.ClusterName, lastChecked time.Time) { +func SetClustersLastChecked(storage *OCPRecommendationsDBStorage, cluster types.ClusterName, lastChecked time.Time) { storage.clustersLastChecked[cluster] = lastChecked } func InsertRecommendations( - storage *DBStorage, orgID types.OrgID, + storage *OCPRecommendationsDBStorage, orgID types.OrgID, clusterName types.ClusterName, report types.ReportRules, createdAt types.Timestamp, impactedSince map[string]types.Timestamp, diff --git a/storage/generic_insert_benchmark_test.go b/storage/generic_insert_benchmark_test.go index deb233505..090509688 100644 --- a/storage/generic_insert_benchmark_test.go +++ b/storage/generic_insert_benchmark_test.go @@ -29,22 +29,16 @@ const ( rowCount = 1000 insertQuery = "INSERT INTO benchmark_tab (name, value) VALUES ($1, $2);" upsertQuery = "INSERT INTO benchmark_tab (id, name, value) VALUES ($1, $2, $3) ON CONFLICT (id) DO UPDATE SET name=$2, value=$3;" - // SQLite alternative to the upsert query above: - // upsertQuery = "REPLACE INTO benchmark_tab (id, name, value) VALUES ($1, $2, $3);" ) -func mustPrepareBenchmark(b *testing.B) (storage.Storage, *sql.DB, func()) { +func mustPrepareBenchmark(b *testing.B) (storage.OCPRecommendationsStorage, *sql.DB, func()) { // Postgres queries are very verbose at DEBUG log level, so it's better // to silence them this way to make benchmark results easier to find. zerolog.SetGlobalLevel(zerolog.WarnLevel) mockStorage, closer := ira_helpers.MustGetPostgresStorage(b, false) - // Alternative using the file-based SQLite DB storage: - // mockStorage, _ := helpers.MustGetSQLiteFileStorage(b) - // Old version using the in-memory SQLite DB storage: - // mockStorage := helpers.MustGetMockStorage(b, false) - conn := storage.GetConnection(mockStorage.(*storage.DBStorage)) + conn := storage.GetConnection(mockStorage.(*storage.OCPRecommendationsDBStorage)) _, err := conn.Exec("DROP TABLE IF EXISTS benchmark_tab;") helpers.FailOnError(b, err) diff --git a/storage/info_rule_test.go b/storage/info_rule_test.go index de29785c8..af8eaaf86 100644 --- a/storage/info_rule_test.go +++ b/storage/info_rule_test.go @@ -27,7 +27,7 @@ import ( ) func TestWriteReportInfoForCluster(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() expectations := []struct { @@ -89,7 +89,7 @@ func TestWriteReportInfoForCluster(t *testing.T) { } func TestReadClusterVersionsForClusterList(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusterList := make([]string, 4) @@ -161,7 +161,7 @@ func TestReadClusterVersionsForClusterList(t *testing.T) { } func TestReadClusterVersionsForClusterListEmpty(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() versionMap, err := mockStorage.ReadClusterVersionsForClusterList( @@ -172,7 +172,7 @@ func TestReadClusterVersionsForClusterListEmpty(t *testing.T) { } func TestReadClusterVersionsForClusterListDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) closer() _, err := mockStorage.ReadClusterVersionsForClusterList( @@ -183,7 +183,7 @@ func TestReadClusterVersionsForClusterListDBError(t *testing.T) { // TestDBStorageReadClusterListRecommendationsNoRecommendations checks that when no recommendations // are stored, it is an OK state func TestDBStorageReadClusterListRecommendationsNoRecommendationsWithVersion(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -214,7 +214,7 @@ func TestDBStorageReadClusterListRecommendationsNoRecommendationsWithVersion(t * // TestDBStorageReadClusterListRecommendationsWithVersion checks that a cluster with cluster_version // is OK func TestDBStorageReadClusterListRecommendationsWithVersion(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( diff --git a/storage/info_rules.go b/storage/info_rules.go index 7a69d4e7b..8fbbeacff 100644 --- a/storage/info_rules.go +++ b/storage/info_rules.go @@ -35,7 +35,7 @@ const ( ) // WriteReportInfoForCluster writes the relevant report info for selected cluster for hiven organization -func (storage DBStorage) WriteReportInfoForCluster( +func (storage OCPRecommendationsDBStorage) WriteReportInfoForCluster( orgID types.OrgID, clusterName types.ClusterName, info []types.InfoItem, @@ -44,7 +44,7 @@ func (storage DBStorage) WriteReportInfoForCluster( // Not checking if there is a previous report because this method will // only be called after successfully writing the main report. If that // fails, this method won't be called - if storage.dbDriverType != types.DBDriverSQLite3 && storage.dbDriverType != types.DBDriverPostgres { + if storage.dbDriverType != types.DBDriverPostgres { return fmt.Errorf("writing report with DB %v is not supported", storage.dbDriverType) } @@ -60,7 +60,7 @@ func (storage DBStorage) WriteReportInfoForCluster( return err } -func (storage DBStorage) updateInfoReport( +func (storage OCPRecommendationsDBStorage) updateInfoReport( tx *sql.Tx, orgID types.OrgID, clusterName types.ClusterName, @@ -83,7 +83,7 @@ func (storage DBStorage) updateInfoReport( } // ReadReportInfoForCluster retrieve the Version for a given cluster and org id -func (storage *DBStorage) ReadReportInfoForCluster( +func (storage *OCPRecommendationsDBStorage) ReadReportInfoForCluster( orgID types.OrgID, clusterName types.ClusterName, ) (types.Version, error) { @@ -91,13 +91,13 @@ func (storage *DBStorage) ReadReportInfoForCluster( err := storage.connection.QueryRow( ` -SELECT - COALESCE ( - ( - SELECT version_info - FROM report_info - WHERE org_id = $1 AND cluster_id = $2 - ), '') +SELECT + COALESCE ( + ( + SELECT version_info + FROM report_info + WHERE org_id = $1 AND cluster_id = $2 + ), '') AS version_info; `, orgID, clusterName, @@ -108,7 +108,7 @@ SELECT } // ReadClusterVersionsForClusterList retrieve the cluster version for a given cluster list and org id -func (storage *DBStorage) ReadClusterVersionsForClusterList( +func (storage *OCPRecommendationsDBStorage) ReadClusterVersionsForClusterList( orgID types.OrgID, clusterList []string, ) (map[types.ClusterName]types.Version, error) { @@ -120,7 +120,7 @@ func (storage *DBStorage) ReadClusterVersionsForClusterList( query := ` SELECT cluster_id, COALESCE(version_info, '') as version_info - FROM report_info + FROM report_info WHERE org_id = $1 AND cluster_id IN (%v) ` @@ -152,7 +152,7 @@ func (storage *DBStorage) ReadClusterVersionsForClusterList( return clusterMap, err } -func (storage DBStorage) fillInMetadata(orgID types.OrgID, clusterMap ctypes.ClusterRecommendationMap) { +func (storage OCPRecommendationsDBStorage) fillInMetadata(orgID types.OrgID, clusterMap ctypes.ClusterRecommendationMap) { clusterList := make([]string, len(clusterMap)) var i int diff --git a/storage/noop_dvo_recommendations_storage.go b/storage/noop_dvo_recommendations_storage.go new file mode 100644 index 000000000..c3fc4a841 --- /dev/null +++ b/storage/noop_dvo_recommendations_storage.go @@ -0,0 +1,98 @@ +// Copyright 2023 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "database/sql" + "time" + + "github.com/RedHatInsights/insights-results-aggregator/migration" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +// NoopDVOStorage represents a storage which does nothing (for benchmarking without a storage) +type NoopDVOStorage struct{} + +// Init noop +func (*NoopDVOStorage) Init() error { + return nil +} + +// Close noop +func (*NoopDVOStorage) Close() error { + return nil +} + +// GetMigrations noop +func (*NoopDVOStorage) GetMigrations() []migration.Migration { + return nil +} + +// GetDBDriverType noop +func (*NoopDVOStorage) GetDBDriverType() types.DBDriver { + return types.DBDriver(-1) +} + +// GetConnection noop +func (*NoopDVOStorage) GetConnection() *sql.DB { + return nil +} + +// GetMaxVersion noop +func (*NoopDVOStorage) GetMaxVersion() migration.Version { + return migration.Version(0) +} + +// GetDBSchema noop +func (*NoopDVOStorage) GetDBSchema() migration.Schema { + return migration.Schema("") +} + +// MigrateToLatest noop +func (*NoopDVOStorage) MigrateToLatest() error { + return nil +} + +// ReportsCount noop +func (*NoopDVOStorage) ReportsCount() (int, error) { + return 0, nil +} + +// WriteReportForCluster noop +func (*NoopDVOStorage) WriteReportForCluster( + types.OrgID, types.ClusterName, types.ClusterReport, []types.WorkloadRecommendation, time.Time, time.Time, time.Time, + types.RequestID, +) error { + return nil +} + +// ReadWorkloadsForOrganization noop +func (*NoopDVOStorage) ReadWorkloadsForOrganization(types.OrgID) ([]types.DVOReport, error) { + return nil, nil +} + +// ReadWorkloadsForClusterAndNamespace noop +func (*NoopDVOStorage) ReadWorkloadsForClusterAndNamespace( + types.OrgID, + types.ClusterName, + string, +) (types.DVOReport, error) { + return types.DVOReport{}, nil +} + +// DeleteReportsForOrg noop +func (*NoopDVOStorage) DeleteReportsForOrg(types.OrgID) error { + return nil +} diff --git a/storage/noop_dvo_recommendations_storage_test.go b/storage/noop_dvo_recommendations_storage_test.go new file mode 100644 index 000000000..d364c8d47 --- /dev/null +++ b/storage/noop_dvo_recommendations_storage_test.go @@ -0,0 +1,42 @@ +// Copyright 2020, 2021, 2022, 2023 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage_test + +import ( + "testing" + "time" + + "github.com/RedHatInsights/insights-results-aggregator/storage" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +// Don't decrease code coverage by non-functional and not covered code. + +// TestDVONoopStorageEmptyMethods1 calls empty methods that just needs to be +// defined in order for NoopStorage to satisfy Storage interface. +func TestDVONoopStorageEmptyMethods(_ *testing.T) { + noopStorage := storage.NoopDVOStorage{} + orgID := types.OrgID(1) + + _ = noopStorage.Init() + _ = noopStorage.Close() + _, _ = noopStorage.ReportsCount() + _ = noopStorage.DeleteReportsForOrg(orgID) + _ = noopStorage.MigrateToLatest() + _ = noopStorage.GetConnection() + _ = noopStorage.GetDBDriverType() + + _ = noopStorage.WriteReportForCluster(0, "", "", validDVORecommendation, time.Now(), time.Now(), time.Now(), "") +} diff --git a/storage/noop_storage.go b/storage/noop_ocp_recommendations_storage.go similarity index 64% rename from storage/noop_storage.go rename to storage/noop_ocp_recommendations_storage.go index 562502ec6..23ffa679c 100644 --- a/storage/noop_storage.go +++ b/storage/noop_ocp_recommendations_storage.go @@ -18,68 +18,83 @@ import ( "database/sql" "time" - "github.com/RedHatInsights/insights-content-service/content" "github.com/Shopify/sarama" ctypes "github.com/RedHatInsights/insights-results-types" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) -// NoopStorage represents a storage which does nothing (for benchmarking without a storage) -type NoopStorage struct{} +// NoopOCPStorage represents a storage which does nothing (for benchmarking without a storage) +type NoopOCPStorage struct{} // Init noop -func (*NoopStorage) Init() error { +func (*NoopOCPStorage) Init() error { return nil } // Close noop -func (*NoopStorage) Close() error { +func (*NoopOCPStorage) Close() error { return nil } +// GetMigrations noop +func (*NoopOCPStorage) GetMigrations() []migration.Migration { + return nil +} + +// GetDBSchema noop +func (*NoopOCPStorage) GetDBSchema() migration.Schema { + return migration.Schema("") +} + +// GetMaxVersion noop +func (*NoopOCPStorage) GetMaxVersion() migration.Version { + return migration.Version(0) +} + // ListOfOrgs noop -func (*NoopStorage) ListOfOrgs() ([]types.OrgID, error) { +func (*NoopOCPStorage) ListOfOrgs() ([]types.OrgID, error) { return nil, nil } // ListOfClustersForOrg noop -func (*NoopStorage) ListOfClustersForOrg(types.OrgID, time.Time) ([]types.ClusterName, error) { +func (*NoopOCPStorage) ListOfClustersForOrg(types.OrgID, time.Time) ([]types.ClusterName, error) { return nil, nil } // ReadReportForCluster noop -func (*NoopStorage) ReadReportForCluster(types.OrgID, types.ClusterName) ([]types.RuleOnReport, types.Timestamp, types.Timestamp, types.Timestamp, error) { +func (*NoopOCPStorage) ReadReportForCluster(types.OrgID, types.ClusterName) ([]types.RuleOnReport, types.Timestamp, types.Timestamp, types.Timestamp, error) { return []types.RuleOnReport{}, "", "", "", nil } // ReadReportInfoForCluster noop -func (*NoopStorage) ReadReportInfoForCluster(types.OrgID, types.ClusterName) (types.Version, error) { +func (*NoopOCPStorage) ReadReportInfoForCluster(types.OrgID, types.ClusterName) (types.Version, error) { return "", nil } // ReadClusterVersionsForClusterList noop -func (*NoopStorage) ReadClusterVersionsForClusterList( +func (*NoopOCPStorage) ReadClusterVersionsForClusterList( types.OrgID, []string, ) (map[types.ClusterName]types.Version, error) { return nil, nil } // ReadSingleRuleTemplateData noop -func (*NoopStorage) ReadSingleRuleTemplateData(types.OrgID, types.ClusterName, types.RuleID, types.ErrorKey) (interface{}, error) { +func (*NoopOCPStorage) ReadSingleRuleTemplateData(types.OrgID, types.ClusterName, types.RuleID, types.ErrorKey) (interface{}, error) { return "", nil } // ReadReportForClusterByClusterName noop -func (*NoopStorage) ReadReportForClusterByClusterName( +func (*NoopOCPStorage) ReadReportForClusterByClusterName( types.ClusterName, ) ([]types.RuleOnReport, types.Timestamp, error) { return []types.RuleOnReport{}, "", nil } // WriteReportForCluster noop -func (*NoopStorage) WriteReportForCluster( +func (*NoopOCPStorage) WriteReportForCluster( types.OrgID, types.ClusterName, types.ClusterReport, []types.ReportItem, time.Time, time.Time, time.Time, types.RequestID, ) error { @@ -87,7 +102,7 @@ func (*NoopStorage) WriteReportForCluster( } // WriteReportInfoForCluster noop -func (*NoopStorage) WriteReportInfoForCluster( +func (*NoopOCPStorage) WriteReportInfoForCluster( _ types.OrgID, _ types.ClusterName, _ []types.InfoItem, @@ -97,115 +112,110 @@ func (*NoopStorage) WriteReportInfoForCluster( } // WriteRecommendationsForCluster noop -func (*NoopStorage) WriteRecommendationsForCluster( +func (*NoopOCPStorage) WriteRecommendationsForCluster( types.OrgID, types.ClusterName, types.ClusterReport, types.Timestamp, ) error { return nil } // ReportsCount noop -func (*NoopStorage) ReportsCount() (int, error) { +func (*NoopOCPStorage) ReportsCount() (int, error) { return 0, nil } // VoteOnRule noop -func (*NoopStorage) VoteOnRule(types.ClusterName, types.RuleID, types.ErrorKey, types.OrgID, types.UserID, types.UserVote, string) error { +func (*NoopOCPStorage) VoteOnRule(types.ClusterName, types.RuleID, types.ErrorKey, types.OrgID, types.UserID, types.UserVote, string) error { return nil } // AddOrUpdateFeedbackOnRule noop -func (*NoopStorage) AddOrUpdateFeedbackOnRule( +func (*NoopOCPStorage) AddOrUpdateFeedbackOnRule( types.ClusterName, types.RuleID, types.ErrorKey, types.OrgID, types.UserID, string, ) error { return nil } // AddFeedbackOnRuleDisable noop -func (*NoopStorage) AddFeedbackOnRuleDisable( +func (*NoopOCPStorage) AddFeedbackOnRuleDisable( types.ClusterName, types.RuleID, types.ErrorKey, types.OrgID, types.UserID, string, ) error { return nil } // GetUserFeedbackOnRuleDisable noop -func (*NoopStorage) GetUserFeedbackOnRuleDisable( +func (*NoopOCPStorage) GetUserFeedbackOnRuleDisable( types.ClusterName, types.RuleID, types.ErrorKey, types.UserID, ) (*UserFeedbackOnRule, error) { return nil, nil } // GetUserFeedbackOnRule noop -func (*NoopStorage) GetUserFeedbackOnRule( +func (*NoopOCPStorage) GetUserFeedbackOnRule( types.ClusterName, types.RuleID, types.ErrorKey, types.UserID, ) (*UserFeedbackOnRule, error) { return nil, nil } // DeleteReportsForOrg noop -func (*NoopStorage) DeleteReportsForOrg(types.OrgID) error { +func (*NoopOCPStorage) DeleteReportsForOrg(types.OrgID) error { return nil } // DeleteReportsForCluster noop -func (*NoopStorage) DeleteReportsForCluster(types.ClusterName) error { - return nil -} - -// LoadRuleContent noop -func (*NoopStorage) LoadRuleContent(content.RuleContentDirectory) error { +func (*NoopOCPStorage) DeleteReportsForCluster(types.ClusterName) error { return nil } // GetRuleByID noop -func (*NoopStorage) GetRuleByID(types.RuleID) (*types.Rule, error) { +func (*NoopOCPStorage) GetRuleByID(types.RuleID) (*types.Rule, error) { return nil, nil } // GetOrgIDByClusterID noop -func (*NoopStorage) GetOrgIDByClusterID(types.ClusterName) (types.OrgID, error) { +func (*NoopOCPStorage) GetOrgIDByClusterID(types.ClusterName) (types.OrgID, error) { return 0, nil } // CreateRule noop -func (*NoopStorage) CreateRule(types.Rule) error { +func (*NoopOCPStorage) CreateRule(types.Rule) error { return nil } // DeleteRule noop -func (*NoopStorage) DeleteRule(types.RuleID) error { +func (*NoopOCPStorage) DeleteRule(types.RuleID) error { return nil } // CreateRuleErrorKey noop -func (*NoopStorage) CreateRuleErrorKey(types.RuleErrorKey) error { +func (*NoopOCPStorage) CreateRuleErrorKey(types.RuleErrorKey) error { return nil } // DeleteRuleErrorKey noop -func (*NoopStorage) DeleteRuleErrorKey(types.RuleID, types.ErrorKey) error { +func (*NoopOCPStorage) DeleteRuleErrorKey(types.RuleID, types.ErrorKey) error { return nil } // WriteConsumerError noop -func (*NoopStorage) WriteConsumerError(*sarama.ConsumerMessage, error) error { +func (*NoopOCPStorage) WriteConsumerError(*sarama.ConsumerMessage, error) error { return nil } // ToggleRuleForCluster noop -func (*NoopStorage) ToggleRuleForCluster( +func (*NoopOCPStorage) ToggleRuleForCluster( types.ClusterName, types.RuleID, types.ErrorKey, types.OrgID, RuleToggle, ) error { return nil } // DeleteFromRuleClusterToggle noop -func (*NoopStorage) DeleteFromRuleClusterToggle( +func (*NoopOCPStorage) DeleteFromRuleClusterToggle( types.ClusterName, types.RuleID) error { return nil } // GetFromClusterRuleToggle noop -func (*NoopStorage) GetFromClusterRuleToggle( +func (*NoopOCPStorage) GetFromClusterRuleToggle( types.ClusterName, types.RuleID, ) (*ClusterRuleToggle, error) { @@ -213,7 +223,7 @@ func (*NoopStorage) GetFromClusterRuleToggle( } // GetTogglesForRules noop -func (*NoopStorage) GetTogglesForRules( +func (*NoopOCPStorage) GetTogglesForRules( types.ClusterName, []types.RuleOnReport, types.OrgID, @@ -222,7 +232,7 @@ func (*NoopStorage) GetTogglesForRules( } // GetUserFeedbackOnRules noop -func (*NoopStorage) GetUserFeedbackOnRules( +func (*NoopOCPStorage) GetUserFeedbackOnRules( types.ClusterName, []types.RuleOnReport, types.UserID, @@ -231,50 +241,50 @@ func (*NoopStorage) GetUserFeedbackOnRules( } // GetRuleWithContent noop -func (*NoopStorage) GetRuleWithContent( +func (*NoopOCPStorage) GetRuleWithContent( types.RuleID, types.ErrorKey, ) (*types.RuleWithContent, error) { return nil, nil } // GetUserDisableFeedbackOnRules noop -func (*NoopStorage) GetUserDisableFeedbackOnRules( +func (*NoopOCPStorage) GetUserDisableFeedbackOnRules( types.ClusterName, []types.RuleOnReport, types.UserID, ) (map[types.RuleID]UserFeedbackOnRule, error) { return nil, nil } // DoesClusterExist noop -func (*NoopStorage) DoesClusterExist(types.ClusterName) (bool, error) { +func (*NoopOCPStorage) DoesClusterExist(types.ClusterName) (bool, error) { return false, nil } // ReadOrgIDsForClusters read organization IDs for given list of cluster names. -func (*NoopStorage) ReadOrgIDsForClusters(_ []types.ClusterName) ([]types.OrgID, error) { +func (*NoopOCPStorage) ReadOrgIDsForClusters(_ []types.ClusterName) ([]types.OrgID, error) { return nil, nil } // ReadReportsForClusters function reads reports for given list of cluster // names. -func (*NoopStorage) ReadReportsForClusters(_ []types.ClusterName) (map[types.ClusterName]types.ClusterReport, error) { +func (*NoopOCPStorage) ReadReportsForClusters(_ []types.ClusterName) (map[types.ClusterName]types.ClusterReport, error) { return nil, nil } // ListOfDisabledRules function returns list of all rules disabled from a // specified account (noop). -func (*NoopStorage) ListOfDisabledRules(_ types.OrgID) ([]ctypes.DisabledRule, error) { +func (*NoopOCPStorage) ListOfDisabledRules(_ types.OrgID) ([]ctypes.DisabledRule, error) { return nil, nil } // ListOfReasons function returns list of reasons for all rules disabled from a // specified account (noop). -func (*NoopStorage) ListOfReasons(_ types.UserID) ([]DisabledRuleReason, error) { +func (*NoopOCPStorage) ListOfReasons(_ types.UserID) ([]DisabledRuleReason, error) { return nil, nil } // ListOfDisabledClusters function returns list of all clusters disabled for a rule from a // specified account (noop). -func (*NoopStorage) ListOfDisabledClusters( +func (*NoopOCPStorage) ListOfDisabledClusters( _ types.OrgID, _ types.RuleID, _ types.ErrorKey, @@ -284,7 +294,7 @@ func (*NoopStorage) ListOfDisabledClusters( // ListOfDisabledRulesForClusters function returns list of disabled rules for given clusters from a // specified account (noop). -func (*NoopStorage) ListOfDisabledRulesForClusters( +func (*NoopOCPStorage) ListOfDisabledRulesForClusters( _ []string, _ types.OrgID, ) ([]ctypes.DisabledRule, error) { @@ -292,7 +302,7 @@ func (*NoopStorage) ListOfDisabledRulesForClusters( } // RateOnRule function stores the vote (rating) given by an user to a rule+error key -func (*NoopStorage) RateOnRule( +func (*NoopOCPStorage) RateOnRule( types.OrgID, types.RuleID, types.ErrorKey, @@ -302,7 +312,7 @@ func (*NoopStorage) RateOnRule( } // GetRuleRating retrieves rating for given rule and user -func (*NoopStorage) GetRuleRating( +func (*NoopOCPStorage) GetRuleRating( _ types.OrgID, _ types.RuleSelector, ) ( @@ -314,7 +324,7 @@ func (*NoopStorage) GetRuleRating( // DisableRuleSystemWide disables the selected rule for all clusters visible to // given user -func (*NoopStorage) DisableRuleSystemWide( +func (*NoopOCPStorage) DisableRuleSystemWide( _ types.OrgID, _ types.RuleID, _ types.ErrorKey, _ string, ) error { @@ -323,14 +333,14 @@ func (*NoopStorage) DisableRuleSystemWide( // EnableRuleSystemWide enables the selected rule for all clusters visible to // given user -func (*NoopStorage) EnableRuleSystemWide( +func (*NoopOCPStorage) EnableRuleSystemWide( _ types.OrgID, _ types.RuleID, _ types.ErrorKey, ) error { return nil } // UpdateDisabledRuleJustification change justification for already disabled rule -func (*NoopStorage) UpdateDisabledRuleJustification( +func (*NoopOCPStorage) UpdateDisabledRuleJustification( _ types.OrgID, _ types.RuleID, _ types.ErrorKey, @@ -340,7 +350,7 @@ func (*NoopStorage) UpdateDisabledRuleJustification( } // ReadDisabledRule function returns disabled rule (if disabled) from database -func (*NoopStorage) ReadDisabledRule( +func (*NoopOCPStorage) ReadDisabledRule( _ types.OrgID, _ types.RuleID, _ types.ErrorKey, ) (ctypes.SystemWideRuleDisable, bool, error) { return ctypes.SystemWideRuleDisable{}, true, nil @@ -348,14 +358,14 @@ func (*NoopStorage) ReadDisabledRule( // ListOfSystemWideDisabledRules function returns list of all rules that have been // disabled for all clusters by given user -func (*NoopStorage) ListOfSystemWideDisabledRules( +func (*NoopOCPStorage) ListOfSystemWideDisabledRules( _ types.OrgID, ) ([]ctypes.SystemWideRuleDisable, error) { return nil, nil } // ReadRecommendationsForClusters reads all recommendations from recommendation table for given organization -func (*NoopStorage) ReadRecommendationsForClusters( +func (*NoopOCPStorage) ReadRecommendationsForClusters( _ []string, _ types.OrgID, ) (ctypes.RecommendationImpactedClusters, error) { @@ -364,14 +374,14 @@ func (*NoopStorage) ReadRecommendationsForClusters( // ListOfClustersForOrgSpecificRule returns list of all clusters for // given organization that are affected by given rule -func (*NoopStorage) ListOfClustersForOrgSpecificRule( +func (*NoopOCPStorage) ListOfClustersForOrgSpecificRule( _ types.OrgID, _ types.RuleSelector, _ []string, ) ([]ctypes.HittingClustersData, error) { return nil, nil } // ReadClusterListRecommendations retrieves cluster IDs and a list of hitting rules for each one -func (*NoopStorage) ReadClusterListRecommendations( +func (*NoopOCPStorage) ReadClusterListRecommendations( _ []string, _ types.OrgID, ) (ctypes.ClusterRecommendationMap, error) { return nil, nil @@ -379,21 +389,21 @@ func (*NoopStorage) ReadClusterListRecommendations( // MigrateToLatest migrates the database to the latest available // migration version. This must be done before an Init() call. -func (*NoopStorage) MigrateToLatest() error { +func (*NoopOCPStorage) MigrateToLatest() error { return nil } // GetConnection returns db connection(useful for testing) -func (*NoopStorage) GetConnection() *sql.DB { +func (*NoopOCPStorage) GetConnection() *sql.DB { return nil } // PrintRuleDisableDebugInfo is a temporary helper function used to print form // cluster rule toggle related tables -func (*NoopStorage) PrintRuleDisableDebugInfo() { +func (*NoopOCPStorage) PrintRuleDisableDebugInfo() { } // GetDBDriverType returns db driver type -func (*NoopStorage) GetDBDriverType() types.DBDriver { +func (*NoopOCPStorage) GetDBDriverType() types.DBDriver { return types.DBDriverGeneral } diff --git a/storage/noop_storage_test.go b/storage/noop_ocp_recommendations_storage_test.go similarity index 95% rename from storage/noop_storage_test.go rename to storage/noop_ocp_recommendations_storage_test.go index 6a6971b75..43a9f00cc 100644 --- a/storage/noop_storage_test.go +++ b/storage/noop_ocp_recommendations_storage_test.go @@ -18,8 +18,6 @@ import ( "testing" "time" - "github.com/RedHatInsights/insights-content-service/content" - "github.com/RedHatInsights/insights-results-aggregator/storage" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -29,7 +27,7 @@ import ( // TestNoopStorageEmptyMethods1 calls empty methods that just needs to be // defined in order for NoopStorage to satisfy Storage interface. func TestNoopStorageEmptyMethods1(_ *testing.T) { - noopStorage := storage.NoopStorage{} + noopStorage := storage.NoopOCPStorage{} orgID := types.OrgID(1) clusterName := types.ClusterName("") @@ -59,7 +57,7 @@ func TestNoopStorageEmptyMethods1(_ *testing.T) { // TestNoopStorageEmptyMethods2 calls empty methods that just needs to be // defined in order for NoopStorage to satisfy Storage interface. func TestNoopStorageEmptyMethods2(_ *testing.T) { - noopStorage := storage.NoopStorage{} + noopStorage := storage.NoopOCPStorage{} orgID := types.OrgID(1) clusterName := types.ClusterName("") ruleID := types.RuleID("") @@ -73,7 +71,6 @@ func TestNoopStorageEmptyMethods2(_ *testing.T) { _ = noopStorage.AddFeedbackOnRuleDisable(clusterName, ruleID, errorKey, orgID, userID, "") _, _ = noopStorage.GetUserFeedbackOnRuleDisable(clusterName, ruleID, errorKey, userID) _, _ = noopStorage.GetUserFeedbackOnRule(clusterName, ruleID, errorKey, userID) - _ = noopStorage.LoadRuleContent(content.RuleContentDirectory{}) _, _ = noopStorage.GetRuleByID(ruleID) _, _ = noopStorage.GetOrgIDByClusterID(clusterName) _, _ = noopStorage.ListOfSystemWideDisabledRules(orgID) @@ -88,7 +85,7 @@ func TestNoopStorageEmptyMethods2(_ *testing.T) { // TestNoopStorageEmptyMethods3 calls empty methods that just needs to be // defined in order for NoopStorage to satisfy Storage interface. func TestNoopStorageEmptyMethods3(_ *testing.T) { - noopStorage := storage.NoopStorage{} + noopStorage := storage.NoopOCPStorage{} orgID := types.OrgID(1) clusterName := types.ClusterName("") rule := types.Rule{} diff --git a/storage/ocp_recommendations_storage.go b/storage/ocp_recommendations_storage.go new file mode 100644 index 000000000..1c4114ea0 --- /dev/null +++ b/storage/ocp_recommendations_storage.go @@ -0,0 +1,1528 @@ +/* +Copyright © 2020, 2021, 2022, 2023 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +*/ + +// Package storage contains an implementation of interface between Go code and +// (almost any) SQL database like PostgreSQL or MariaDB. An implementation +// named DBStorage is constructed using the function 'New' and it is mandatory to +// call 'Close' for any opened connection to the database. The storage might be +// initialized by 'Init' method if database schema is empty. +// +// It is possible to configure connection to selected database by using Configuration +// structure. Currently, that structure contains two configurable parameter: +// +// Driver - a SQL driver, like "pq", "pgx", etc. +// DataSource - specification of data source. The content of this parameter depends on the database used. +package storage + +import ( + "database/sql" + sql_driver "database/sql/driver" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/Shopify/sarama" + "github.com/lib/pq" // PostgreSQL database driver + "github.com/rs/zerolog/log" + + "github.com/RedHatInsights/insights-operator-utils/redis" + ctypes "github.com/RedHatInsights/insights-results-types" + + "github.com/RedHatInsights/insights-results-aggregator/metrics" + "github.com/RedHatInsights/insights-results-aggregator/migration" + "github.com/RedHatInsights/insights-results-aggregator/migration/ocpmigrations" + + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +// OCPRecommendationsStorage represents an interface to almost any database or storage system +type OCPRecommendationsStorage interface { + Init() error + Close() error + ListOfOrgs() ([]types.OrgID, error) + ListOfClustersForOrg( + orgID types.OrgID, timeLimit time.Time) ([]types.ClusterName, error, + ) + ListOfClustersForOrgSpecificRule( + orgID types.OrgID, ruleID types.RuleSelector, activeClusters []string, + ) ([]ctypes.HittingClustersData, error) + ReadReportForCluster( + orgID types.OrgID, clusterName types.ClusterName) ( + []types.RuleOnReport, types.Timestamp, types.Timestamp, types.Timestamp, error, + ) + ReadReportInfoForCluster( + types.OrgID, types.ClusterName) ( + types.Version, error, + ) + ReadClusterVersionsForClusterList( + types.OrgID, []string, + ) (map[types.ClusterName]types.Version, error) + ReadReportsForClusters( + clusterNames []types.ClusterName) (map[types.ClusterName]types.ClusterReport, error) + ReadOrgIDsForClusters( + clusterNames []types.ClusterName) ([]types.OrgID, error) + ReadSingleRuleTemplateData( + orgID types.OrgID, clusterName types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, + ) (interface{}, error) + ReadReportForClusterByClusterName(clusterName types.ClusterName) ([]types.RuleOnReport, types.Timestamp, error) + WriteReportForCluster( + orgID types.OrgID, + clusterName types.ClusterName, + report types.ClusterReport, + rules []types.ReportItem, + collectedAtTime time.Time, + gatheredAtTime time.Time, + storedAtTime time.Time, + requestID types.RequestID, + ) error + WriteReportInfoForCluster( + types.OrgID, + types.ClusterName, + []types.InfoItem, + time.Time, + ) error + WriteRecommendationsForCluster( + orgID types.OrgID, + clusterName types.ClusterName, + report types.ClusterReport, + creationTime types.Timestamp, + ) error + ReportsCount() (int, error) + VoteOnRule( + clusterID types.ClusterName, + ruleID types.RuleID, + errorKey types.ErrorKey, + orgID types.OrgID, + userID types.UserID, + userVote types.UserVote, + voteMessage string, + ) error + AddOrUpdateFeedbackOnRule( + clusterID types.ClusterName, + ruleID types.RuleID, + errorKey types.ErrorKey, + orgID types.OrgID, + userID types.UserID, + message string, + ) error + AddFeedbackOnRuleDisable( + clusterID types.ClusterName, + ruleID types.RuleID, + errorKey types.ErrorKey, + orgID types.OrgID, + userID types.UserID, + message string, + ) error + GetUserFeedbackOnRule( + clusterID types.ClusterName, + ruleID types.RuleID, + errorKey types.ErrorKey, + userID types.UserID, + ) (*UserFeedbackOnRule, error) + GetUserFeedbackOnRuleDisable( + clusterID types.ClusterName, + ruleID types.RuleID, + errorKey types.ErrorKey, + userID types.UserID, + ) (*UserFeedbackOnRule, error) + DeleteReportsForOrg(orgID types.OrgID) error + DeleteReportsForCluster(clusterName types.ClusterName) error + ToggleRuleForCluster( + clusterID types.ClusterName, + ruleID types.RuleID, + errorKey types.ErrorKey, + orgID types.OrgID, + ruleToggle RuleToggle, + ) error + GetFromClusterRuleToggle( + types.ClusterName, + types.RuleID, + ) (*ClusterRuleToggle, error) + GetTogglesForRules( + clusterID types.ClusterName, + rulesReport []types.RuleOnReport, + orgID types.OrgID, + ) (map[types.RuleID]bool, error) + DeleteFromRuleClusterToggle( + clusterID types.ClusterName, + ruleID types.RuleID, + ) error + GetOrgIDByClusterID(cluster types.ClusterName) (types.OrgID, error) + WriteConsumerError(msg *sarama.ConsumerMessage, consumerErr error) error + GetUserFeedbackOnRules( + clusterID types.ClusterName, + rulesReport []types.RuleOnReport, + userID types.UserID, + ) (map[types.RuleID]types.UserVote, error) + GetUserDisableFeedbackOnRules( + clusterID types.ClusterName, + rulesReport []types.RuleOnReport, + userID types.UserID, + ) (map[types.RuleID]UserFeedbackOnRule, error) + DoesClusterExist(clusterID types.ClusterName) (bool, error) + ListOfDisabledRules(orgID types.OrgID) ([]ctypes.DisabledRule, error) + ListOfReasons(userID types.UserID) ([]DisabledRuleReason, error) + ListOfDisabledRulesForClusters( + clusterList []string, + orgID types.OrgID, + ) ([]ctypes.DisabledRule, error) + ListOfDisabledClusters( + orgID types.OrgID, + ruleID types.RuleID, + errorKey types.ErrorKey, + ) ([]ctypes.DisabledClusterInfo, error) + RateOnRule( + types.OrgID, + types.RuleID, + types.ErrorKey, + types.UserVote, + ) error + GetRuleRating( + types.OrgID, + types.RuleSelector, + ) (types.RuleRating, error) + DisableRuleSystemWide( + orgID types.OrgID, ruleID types.RuleID, + errorKey types.ErrorKey, justification string, + ) error + EnableRuleSystemWide( + orgID types.OrgID, + ruleID types.RuleID, + errorKey types.ErrorKey, + ) error + UpdateDisabledRuleJustification( + orgID types.OrgID, + ruleID types.RuleID, + errorKey types.ErrorKey, + justification string, + ) error + ReadDisabledRule( + orgID types.OrgID, ruleID types.RuleID, errorKey types.ErrorKey, + ) (ctypes.SystemWideRuleDisable, bool, error) + ListOfSystemWideDisabledRules( + orgID types.OrgID, + ) ([]ctypes.SystemWideRuleDisable, error) + ReadRecommendationsForClusters([]string, types.OrgID) (ctypes.RecommendationImpactedClusters, error) + ReadClusterListRecommendations(clusterList []string, orgID types.OrgID) ( + ctypes.ClusterRecommendationMap, error, + ) + MigrateToLatest() error + GetConnection() *sql.DB + PrintRuleDisableDebugInfo() + GetDBDriverType() types.DBDriver + GetMigrations() []migration.Migration + GetDBSchema() migration.Schema + GetMaxVersion() migration.Version +} + +const ( + // ReportSuffix is used to strip away .report suffix from rule module names + ReportSuffix = ".report" + + // ocpDBSchema uses the default public schema in all queries/migrations and all environments + ocpDBSchema = "public" +) + +// OCPRecommendationsDBStorage is an implementation of Storage interface that use selected SQL like database +// like PostgreSQL, MariaDB, RDS etc. That implementation is based on the standard +// sql package. It is possible to configure connection via Configuration structure. +// SQLQueriesLog is log for sql queries, default is nil which means nothing is logged +type OCPRecommendationsDBStorage struct { + connection *sql.DB + dbDriverType types.DBDriver + // clusterLastCheckedDict is a dictionary of timestamps when the clusters were last checked. + clustersLastChecked map[types.ClusterName]time.Time +} + +// NewOCPRecommendationsStorage function creates and initializes a new instance of Storage interface +func NewOCPRecommendationsStorage(configuration Configuration) (OCPRecommendationsStorage, error) { + switch configuration.Type { + case types.SQLStorage: + log.Info().Str("OCP storage type", configuration.Type).Send() + return newSQLStorage(configuration) + case types.RedisStorage: + log.Info().Str("Redis storage type", configuration.Type).Send() + return newRedisStorage(configuration) + case types.NoopStorage: + return newNoopOCPStorage(configuration) + default: + // error to be thrown + err := fmt.Errorf("Unknown storage type '%s'", configuration.Type) + log.Error().Err(err).Msg("Init failure") + return nil, err + } +} + +// newNoopOCPStorage function creates and initializes a new instance of Noop storage +func newNoopOCPStorage(_ Configuration) (OCPRecommendationsStorage, error) { + return &NoopOCPStorage{}, nil +} + +// newRedisStorage function creates and initializes a new instance of Redis storage +func newRedisStorage(configuration Configuration) (OCPRecommendationsStorage, error) { + redisCfg := configuration.RedisConfiguration + log.Info(). + Str("Endpoint", redisCfg.RedisEndpoint). + Int("Database index", redisCfg.RedisDatabase). + Msg("Making connection to Redis storage") + + // pass for unit tests + if redisCfg.RedisEndpoint == "" { + return &RedisStorage{}, nil + } + + client, err := redis.CreateRedisClient( + redisCfg.RedisEndpoint, + redisCfg.RedisDatabase, + redisCfg.RedisPassword, + redisCfg.RedisTimeoutSeconds, + ) + // check for init error + if err != nil { + log.Error().Err(err).Msg("Error constructing Redis client") + return nil, err + } + + log.Info().Msg("Redis client has been initialized") + + redisStorage := &RedisStorage{ + Client: redis.Client{Connection: client}, + } + + err = redisStorage.Init() + if err != nil { + log.Error().Err(err).Msg("Error initializing Redis client") + return nil, err + } + return redisStorage, nil +} + +// newSQLStorage function creates and initializes a new instance of DB storage +func newSQLStorage(configuration Configuration) (OCPRecommendationsStorage, error) { + driverType, driverName, dataSource, err := initAndGetDriver(configuration) + if err != nil { + return nil, err + } + + log.Info().Msgf( + "Making connection to data storage, driver=%s", + driverName, + ) + + connection, err := sql.Open(driverName, dataSource) + if err != nil { + log.Error().Err(err).Msg("Cannot connect to data storage") + return nil, err + } + + return NewOCPRecommendationsFromConnection(connection, driverType), nil +} + +// NewOCPRecommendationsFromConnection function creates and initializes a new instance of Storage interface from prepared connection +func NewOCPRecommendationsFromConnection(connection *sql.DB, dbDriverType types.DBDriver) *OCPRecommendationsDBStorage { + return &OCPRecommendationsDBStorage{ + connection: connection, + dbDriverType: dbDriverType, + clustersLastChecked: map[types.ClusterName]time.Time{}, + } +} + +// initAndGetDriver initializes driver(with logs if logSQLQueries is true), +// checks if it's supported and returns driver type, driver name, dataSource and error +func initAndGetDriver(configuration Configuration) (driverType types.DBDriver, driverName, dataSource string, err error) { + var driver sql_driver.Driver + driverName = configuration.Driver + + switch driverName { + case "postgres": + driverType = types.DBDriverPostgres + driver = &pq.Driver{} + dataSource = fmt.Sprintf( + "postgresql://%v:%v@%v:%v/%v?%v", + configuration.PGUsername, + configuration.PGPassword, + configuration.PGHost, + configuration.PGPort, + configuration.PGDBName, + configuration.PGParams, + ) + default: + err = fmt.Errorf("driver %v is not supported", driverName) + return + } + + if configuration.LogSQLQueries { + driverName = InitSQLDriverWithLogs(driver, driverName) + } + + return +} + +// GetMigrations returns a list of database migrations related to OCP recommendation tables +func (storage OCPRecommendationsDBStorage) GetMigrations() []migration.Migration { + return ocpmigrations.UsableOCPMigrations +} + +// GetDBSchema returns the schema name to be used in queries +func (storage OCPRecommendationsDBStorage) GetDBSchema() migration.Schema { + return migration.Schema(ocpDBSchema) +} + +// GetMaxVersion returns the highest available migration version. +// The DB version cannot be set to a value higher than this. +// This value is equivalent to the length of the list of available migrations. +func (storage OCPRecommendationsDBStorage) GetMaxVersion() migration.Version { + return migration.Version(len(storage.GetMigrations())) +} + +// MigrateToLatest migrates the database to the latest available +// migration version. This must be done before an Init() call. +func (storage OCPRecommendationsDBStorage) MigrateToLatest() error { + dbConn, dbSchema := storage.GetConnection(), storage.GetDBSchema() + + if err := migration.InitInfoTable(dbConn, dbSchema); err != nil { + return err + } + + return migration.SetDBVersion( + dbConn, + storage.dbDriverType, + dbSchema, + storage.GetMaxVersion(), + storage.GetMigrations(), + ) +} + +// Init performs all database initialization +// tasks necessary for further service operation. +func (storage OCPRecommendationsDBStorage) Init() error { + // Read clusterName:LastChecked dictionary from DB. + rows, err := storage.connection.Query("SELECT cluster, last_checked_at FROM report;") + if err != nil { + return err + } + + log.Debug().Msg("executing last_checked_at query") + for rows.Next() { + var ( + clusterName types.ClusterName + lastChecked sql.NullTime + ) + + if err := rows.Scan(&clusterName, &lastChecked); err != nil { + if closeErr := rows.Close(); closeErr != nil { + log.Error().Err(closeErr).Msg("Unable to close the DB rows handle") + } + return err + } + + storage.clustersLastChecked[clusterName] = lastChecked.Time + } + + // Not using defer to close the rows here to: + // - make errcheck happy (it doesn't like ignoring returned errors), + // - return a possible error returned by the Close method. + return rows.Close() +} + +// Close method closes the connection to database. Needs to be called at the end of application lifecycle. +func (storage OCPRecommendationsDBStorage) Close() error { + log.Info().Msg("Closing connection to data storage") + if storage.connection != nil { + err := storage.connection.Close() + if err != nil { + log.Error().Err(err).Msg("Can not close connection to data storage") + return err + } + } + return nil +} + +// Report represents one (latest) cluster report. +// +// Org: organization ID +// Name: cluster GUID in the following format: +// c8590f31-e97e-4b85-b506-c45ce1911a12 +type Report struct { + Org types.OrgID `json:"org"` + Name types.ClusterName `json:"cluster"` + Report types.ClusterReport `json:"report"` + ReportedAt types.Timestamp `json:"reported_at"` +} + +func closeRows(rows *sql.Rows) { + _ = rows.Close() +} + +// ListOfOrgs reads list of all organizations that have at least one cluster report +func (storage OCPRecommendationsDBStorage) ListOfOrgs() ([]types.OrgID, error) { + orgs := make([]types.OrgID, 0) + + rows, err := storage.connection.Query("SELECT DISTINCT org_id FROM report ORDER BY org_id;") + err = types.ConvertDBError(err, nil) + if err != nil { + return orgs, err + } + defer closeRows(rows) + + for rows.Next() { + var orgID types.OrgID + + err = rows.Scan(&orgID) + if err == nil { + orgs = append(orgs, orgID) + } else { + log.Error().Err(err).Msg("ListOfOrgID") + } + } + return orgs, nil +} + +// ListOfClustersForOrg reads list of all clusters fro given organization +func (storage OCPRecommendationsDBStorage) ListOfClustersForOrg(orgID types.OrgID, timeLimit time.Time) ([]types.ClusterName, error) { + clusters := make([]types.ClusterName, 0) + + q := ` + SELECT cluster + FROM report + WHERE org_id = $1 + AND reported_at >= $2 + ORDER BY cluster; + ` + + rows, err := storage.connection.Query(q, orgID, timeLimit) + + err = types.ConvertDBError(err, orgID) + if err != nil { + return clusters, err + } + defer closeRows(rows) + + for rows.Next() { + var clusterName string + + err = rows.Scan(&clusterName) + if err == nil { + clusters = append(clusters, types.ClusterName(clusterName)) + } else { + log.Error().Err(err).Msg("ListOfClustersForOrg") + } + } + return clusters, nil +} + +// ListOfClustersForOrgSpecificRule returns list of all clusters for given organization that are affect by given rule +func (storage OCPRecommendationsDBStorage) ListOfClustersForOrgSpecificRule( + orgID types.OrgID, + ruleID types.RuleSelector, + activeClusters []string) ( + []ctypes.HittingClustersData, error) { + results := make([]ctypes.HittingClustersData, 0) + + var whereClause string + if len(activeClusters) > 0 { + // #nosec G201 + whereClause = fmt.Sprintf(`WHERE org_id = $1 AND rule_id = $2 AND cluster_id IN (%v)`, + inClauseFromSlice(activeClusters)) + } else { + whereClause = `WHERE org_id = $1 AND rule_id = $2` + } + // #nosec G202 + query := `SELECT cluster_id, created_at, impacted_since FROM recommendation ` + whereClause + ` ORDER BY cluster_id;` + + // #nosec G202 + rows, err := storage.connection.Query(query, orgID, ruleID) + + err = types.ConvertDBError(err, orgID) + if err != nil { + return results, err + } + + defer closeRows(rows) + + var ( + clusterName types.ClusterName + lastSeen string + impactedSince string + ) + for rows.Next() { + err = rows.Scan(&clusterName, &lastSeen, &impactedSince) + if err != nil { + log.Error().Err(err).Msg("ListOfClustersForOrgSpecificRule") + } + results = append(results, ctypes.HittingClustersData{ + Cluster: clusterName, + LastSeen: lastSeen, + ImpactedSince: impactedSince, + }) + } + + // This is to ensure 404 when no recommendation is found for the given orgId + selector. + // We can, alternatively, return something like this with a 204 (no content): + // {"data":[],"meta":{"count":0,"component":"test.rule","error_key":"ek"},"status":"not_found"} + if len(results) == 0 { + return results, &types.ItemNotFoundError{ItemID: ruleID} + } + return results, nil +} + +// GetOrgIDByClusterID reads OrgID for specified cluster +func (storage OCPRecommendationsDBStorage) GetOrgIDByClusterID(cluster types.ClusterName) (types.OrgID, error) { + row := storage.connection.QueryRow("SELECT org_id FROM report WHERE cluster = $1 ORDER BY org_id;", cluster) + + var orgID uint64 + err := row.Scan(&orgID) + if err != nil { + log.Error().Err(err).Msg("GetOrgIDByClusterID") + return 0, err + } + return types.OrgID(orgID), nil +} + +// parseTemplateData parses template data and returns a json raw message if it's a json or a string otherwise +func parseTemplateData(templateData []byte) interface{} { + var templateDataJSON json.RawMessage + + err := json.Unmarshal(templateData, &templateDataJSON) + if err != nil { + log.Warn().Err(err).Msgf("unable to parse template data as json") + return templateData + } + + return templateDataJSON +} + +func parseRuleRows(rows *sql.Rows) ([]types.RuleOnReport, error) { + report := make([]types.RuleOnReport, 0) + + for rows.Next() { + var ( + templateDataBytes []byte + ruleFQDN types.RuleID + errorKey types.ErrorKey + createdAt sql.NullTime + ) + + err := rows.Scan(&templateDataBytes, &ruleFQDN, &errorKey, &createdAt) + if err != nil { + log.Error().Err(err).Msg("ReportListForCluster") + return report, err + } + + templateData := parseTemplateData(templateDataBytes) + var createdAtConverted time.Time + if createdAt.Valid { + createdAtConverted = createdAt.Time + } + rule := types.RuleOnReport{ + Module: ruleFQDN, + ErrorKey: errorKey, + TemplateData: templateData, + CreatedAt: types.Timestamp(createdAtConverted.UTC().Format(time.RFC3339)), + } + + report = append(report, rule) + } + + return report, nil +} + +// constructInClausule is a helper function to construct `in` clause for SQL +// statement. +func constructInClausule(howMany int) (string, error) { + // construct the `in` clause in SQL query statement + if howMany < 1 { + return "", fmt.Errorf("at least one value needed") + } + inClausule := "$1" + for i := 2; i <= howMany; i++ { + inClausule += fmt.Sprintf(",$%d", i) + } + return inClausule, nil +} + +// argsWithClusterNames is a helper function to construct arguments for SQL +// statement. +func argsWithClusterNames(clusterNames []types.ClusterName) []interface{} { + // prepare arguments + args := make([]interface{}, len(clusterNames)) + + for i, clusterName := range clusterNames { + args[i] = clusterName + } + return args +} + +// inClauseFromSlice is a helper function to construct `in` clause for SQL +// statement from a given slice of items. The received slice must be []string +// or any other type that can be asserted to []string, or else '1=1' will be +// returned, making the IN clause act like a wildcard. +func inClauseFromSlice(slice interface{}) string { + if slice, ok := slice.([]string); ok { + return "'" + strings.Join(slice, `','`) + `'` + } + return "1=1" +} + +/* +func updateRecommendationsMetrics(cluster string, deleted float64, inserted float64) { + metrics.SQLRecommendationsDeletes.WithLabelValues(cluster).Observe(deleted) + metrics.SQLRecommendationsInserts.WithLabelValues(cluster).Observe(inserted) +} +*/ + +// ReadOrgIDsForClusters read organization IDs for given list of cluster names. +func (storage OCPRecommendationsDBStorage) ReadOrgIDsForClusters(clusterNames []types.ClusterName) ([]types.OrgID, error) { + // stub for return value + ids := make([]types.OrgID, 0) + + if len(clusterNames) < 1 { + return ids, nil + } + + // prepare arguments + args := argsWithClusterNames(clusterNames) + + // construct the `in` clause in SQL query statement + inClausule, err := constructInClausule(len(clusterNames)) + if err != nil { + log.Error().Err(err).Msg(inClauseError) + return ids, err + } + + // disable "G202 (CWE-89): SQL string concatenation" + // #nosec G202 + query := "SELECT DISTINCT org_id FROM report WHERE cluster in (" + inClausule + ");" + + // select results from the database + // #nosec G202 + rows, err := storage.connection.Query(query, args...) + if err != nil { + log.Error().Err(err).Msg("query to get org ids") + return ids, err + } + + // process results returned from database + for rows.Next() { + var orgID types.OrgID + + err := rows.Scan(&orgID) + if err != nil { + log.Error().Err(err).Msg("read one org id") + return ids, err + } + + ids = append(ids, orgID) + } + + // everything seems ok -> return ids + return ids, nil +} + +// ReadReportsForClusters function reads reports for given list of cluster +// names. +func (storage OCPRecommendationsDBStorage) ReadReportsForClusters(clusterNames []types.ClusterName) (map[types.ClusterName]types.ClusterReport, error) { + // stub for return value + reports := make(map[types.ClusterName]types.ClusterReport) + + if len(clusterNames) < 1 { + return reports, nil + } + + // prepare arguments + args := argsWithClusterNames(clusterNames) + + // construct the `in` clause in SQL query statement + inClausule, err := constructInClausule(len(clusterNames)) + if err != nil { + log.Error().Err(err).Msg(inClauseError) + return reports, err + } + + // disable "G202 (CWE-89): SQL string concatenation" + // #nosec G202 + query := "SELECT cluster, report FROM report WHERE cluster in (" + inClausule + ");" + + // select results from the database + // #nosec G202 + rows, err := storage.connection.Query(query, args...) + if err != nil { + return reports, err + } + + // process results returned from database + for rows.Next() { + // convert into requested type + var ( + clusterName types.ClusterName + clusterReport types.ClusterReport + ) + + err := rows.Scan(&clusterName, &clusterReport) + if err != nil { + log.Error().Err(err).Msg("ReadReportsForClusters") + return reports, err + } + + reports[clusterName] = clusterReport + } + + // everything seems ok -> return reports + return reports, nil +} + +// ReadReportForCluster reads result (health status) for selected cluster +func (storage OCPRecommendationsDBStorage) ReadReportForCluster( + orgID types.OrgID, clusterName types.ClusterName, +) ([]types.RuleOnReport, types.Timestamp, types.Timestamp, types.Timestamp, error) { + var lastChecked time.Time + var reportedAt time.Time + var gatheredAtInDB sql.NullTime // to avoid problems + + report := make([]types.RuleOnReport, 0) + + err := storage.connection.QueryRow( + "SELECT last_checked_at, reported_at, gathered_at FROM report WHERE org_id = $1 AND cluster = $2;", + orgID, clusterName, + ).Scan(&lastChecked, &reportedAt, &gatheredAtInDB) + + // convert timestamps to string + var lastCheckedStr = types.Timestamp(lastChecked.UTC().Format(time.RFC3339)) + var reportedAtStr = types.Timestamp(reportedAt.UTC().Format(time.RFC3339)) + var gatheredAtStr types.Timestamp + + if gatheredAtInDB.Valid { + gatheredAtStr = types.Timestamp(gatheredAtInDB.Time.UTC().Format(time.RFC3339)) + } else { + gatheredAtStr = "" + } + + err = types.ConvertDBError(err, []interface{}{orgID, clusterName}) + if err != nil { + log.Error().Err(err).Str(clusterKey, string(clusterName)).Msg( + "ReadReportForCluster query from report table error", + ) + return report, lastCheckedStr, reportedAtStr, gatheredAtStr, err + } + + rows, err := storage.connection.Query( + "SELECT template_data, rule_fqdn, error_key, created_at FROM rule_hit WHERE org_id = $1 AND cluster_id = $2;", orgID, clusterName, + ) + + err = types.ConvertDBError(err, []interface{}{orgID, clusterName}) + if err != nil { + log.Error().Err(err).Str(clusterKey, string(clusterName)).Msg( + "ReadReportForCluster query from rule_hit table error", + ) + return report, lastCheckedStr, reportedAtStr, gatheredAtStr, err + } + + report, err = parseRuleRows(rows) + + return report, lastCheckedStr, reportedAtStr, gatheredAtStr, err +} + +// ReadSingleRuleTemplateData reads template data for a single rule +func (storage OCPRecommendationsDBStorage) ReadSingleRuleTemplateData( + orgID types.OrgID, clusterName types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, +) (interface{}, error) { + var templateDataBytes []byte + + err := storage.connection.QueryRow(` + SELECT template_data FROM rule_hit + WHERE org_id = $1 AND cluster_id = $2 AND rule_fqdn = $3 AND error_key = $4; + `, + orgID, + clusterName, + ruleID, + errorKey, + ).Scan(&templateDataBytes) + err = types.ConvertDBError(err, []interface{}{orgID, clusterName, ruleID, errorKey}) + + return parseTemplateData(templateDataBytes), err +} + +// ReadReportForClusterByClusterName reads result (health status) for selected cluster for given organization +func (storage OCPRecommendationsDBStorage) ReadReportForClusterByClusterName( + clusterName types.ClusterName, +) ([]types.RuleOnReport, types.Timestamp, error) { + report := make([]types.RuleOnReport, 0) + var lastChecked time.Time + + err := storage.connection.QueryRow( + "SELECT last_checked_at FROM report WHERE cluster = $1;", clusterName, + ).Scan(&lastChecked) + + switch { + case err == sql.ErrNoRows: + return report, "", &types.ItemNotFoundError{ + ItemID: fmt.Sprintf("%v", clusterName), + } + case err != nil: + return report, "", err + } + + rows, err := storage.connection.Query( + "SELECT template_data, rule_fqdn, error_key, created_at FROM rule_hit WHERE cluster_id = $1;", clusterName, + ) + + if err != nil { + return report, types.Timestamp(lastChecked.UTC().Format(time.RFC3339)), err + } + + report, err = parseRuleRows(rows) + + return report, types.Timestamp(lastChecked.UTC().Format(time.RFC3339)), err +} + +// GetRuleHitInsertStatement method prepares DB statement to be used to write +// rule FQDN + rule error key into rule_hit table for given cluster_id +func (storage OCPRecommendationsDBStorage) GetRuleHitInsertStatement(rules []types.ReportItem) string { + const ruleInsertStatement = "INSERT INTO rule_hit(org_id, cluster_id, rule_fqdn, error_key, template_data, created_at) VALUES %s" + + // pre-allocate array for placeholders + placeholders := make([]string, len(rules)) + + // fill-in placeholders for INSERT statement + for index := range rules { + placeholders[index] = fmt.Sprintf("($%d,$%d,$%d,$%d,$%d,$%d)", + index*6+1, + index*6+2, + index*6+3, + index*6+4, + index*6+5, + index*6+6, + ) + } + + // construct INSERT statement for multiple values + return fmt.Sprintf(ruleInsertStatement, strings.Join(placeholders, ",")) +} + +// valuesForRuleHitsInsert function prepares values to insert rules into +// rule_hit table. +func valuesForRuleHitsInsert( + orgID types.OrgID, + clusterName types.ClusterName, + rules []types.ReportItem, + ruleKeyCreatedAt map[string]types.Timestamp, +) []interface{} { + // fill-in values for INSERT statement + values := make([]interface{}, len(rules)*6) + + for index, rule := range rules { + ruleKey := string(rule.Module) + string(rule.ErrorKey) + var impactedSince types.Timestamp + if val, ok := ruleKeyCreatedAt[ruleKey]; ok { + impactedSince = val + } else { + impactedSince = types.Timestamp(time.Now().UTC().Format(time.RFC3339)) + } + values[6*index] = orgID + values[6*index+1] = clusterName + values[6*index+2] = rule.Module + values[6*index+3] = rule.ErrorKey + values[6*index+4] = string(rule.TemplateData) + values[6*index+5] = impactedSince + } + return values +} + +func (storage OCPRecommendationsDBStorage) updateReport( + tx *sql.Tx, + orgID types.OrgID, + clusterName types.ClusterName, + report types.ClusterReport, + rules []types.ReportItem, + lastCheckedTime time.Time, + gatheredAt time.Time, + reportedAtTime time.Time, +) error { + // Get the UPSERT query for writing a report into the database. + reportUpsertQuery := storage.getReportUpsertQuery() + + // Get created_at if present before deletion + query := "SELECT rule_fqdn, error_key, created_at FROM rule_hit WHERE org_id = $1 AND cluster_id = $2;" + RuleKeyCreatedAt, err := storage.getRuleKeyCreatedAtMap( + query, orgID, clusterName, + ) + if err != nil { + log.Error().Err(err).Msgf("Unable to get recommendation impacted_since") + RuleKeyCreatedAt = make(map[string]types.Timestamp) // create empty map + } + + deleteQuery := "DELETE FROM rule_hit WHERE org_id = $1 AND cluster_id = $2;" + _, err = tx.Exec(deleteQuery, orgID, clusterName) + if err != nil { + log.Err(err).Msgf("Unable to remove previous cluster reports (org: %v, cluster: %v)", orgID, clusterName) + return err + } + + // Perform the report insert. + // All older rule hits has been deleted for given cluster so it is + // possible to just insert new hits w/o the need to update on conflict + if len(rules) > 0 { + // Get the INSERT statement for writing a rule into the database. + ruleInsertStatement := storage.GetRuleHitInsertStatement(rules) + + // Get values to be stored in rule_hits table + values := valuesForRuleHitsInsert(orgID, clusterName, rules, RuleKeyCreatedAt) + + _, err = tx.Exec(ruleInsertStatement, values...) + if err != nil { + log.Err(err).Msgf("Unable to insert the cluster report rules (org: %v, cluster: %v)", + orgID, clusterName, + ) + return err + } + } + + if gatheredAt.IsZero() { + _, err = tx.Exec(reportUpsertQuery, orgID, clusterName, report, reportedAtTime, lastCheckedTime, 0, sql.NullTime{Valid: false}) + } else { + _, err = tx.Exec(reportUpsertQuery, orgID, clusterName, report, reportedAtTime, lastCheckedTime, 0, gatheredAt) + } + + if err != nil { + log.Err(err).Msgf("Unable to upsert the cluster report (org: %v, cluster: %v)", orgID, clusterName) + return err + } + + return nil +} + +func prepareInsertRecommendationsStatement( + orgID types.OrgID, + clusterName types.ClusterName, + report types.ReportRules, + createdAt types.Timestamp, + impactedSinceMap map[string]types.Timestamp, +) (selectors []string, statement string, statementArgs []interface{}) { + statement = `INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key, rule_id, created_at, impacted_since) VALUES %s` + + valuesIdx := make([]string, len(report.HitRules)) + statementIdx := 0 + selectors = make([]string, len(report.HitRules)) + + for idx, rule := range report.HitRules { + ruleFqdn := strings.TrimSuffix(string(rule.Module), ReportSuffix) + ruleID := ruleFqdn + "|" + string(rule.ErrorKey) + impactedSince, ok := impactedSinceMap[ruleFqdn+string(rule.ErrorKey)] + if !ok { + impactedSince = createdAt + } + selectors[idx] = ruleID + statementArgs = append(statementArgs, orgID, clusterName, ruleFqdn, rule.ErrorKey, ruleID, createdAt, impactedSince) + statementIdx = len(statementArgs) + const separatorAndParam = ", $" + valuesIdx[idx] = "($" + fmt.Sprint(statementIdx-6) + + separatorAndParam + fmt.Sprint(statementIdx-5) + + separatorAndParam + fmt.Sprint(statementIdx-4) + + separatorAndParam + fmt.Sprint(statementIdx-3) + + separatorAndParam + fmt.Sprint(statementIdx-2) + + separatorAndParam + fmt.Sprint(statementIdx-1) + + separatorAndParam + fmt.Sprint(statementIdx) + ")" + } + + statement = fmt.Sprintf(statement, strings.Join(valuesIdx, ",")) + return +} + +func (storage OCPRecommendationsDBStorage) insertRecommendations( + tx *sql.Tx, + orgID types.OrgID, + clusterName types.ClusterName, + report types.ReportRules, + createdAt types.Timestamp, + impactedSince map[string]types.Timestamp, +) (inserted int, err error) { + if len(report.HitRules) == 0 { + log.Info(). + Int(organizationKey, int(orgID)). + Str(clusterKey, string(clusterName)). + Int(issuesCountKey, 0). + Msg("No new recommendation to insert") + return 0, nil + } + + selectors, statement, args := prepareInsertRecommendationsStatement(orgID, clusterName, report, createdAt, impactedSince) + + if _, err = tx.Exec(statement, args...); err != nil { + log.Error(). + Int(organizationKey, int(orgID)). + Str(clusterKey, string(clusterName)). + Int(issuesCountKey, inserted). + Interface(createdAtKey, createdAt). + Strs(selectorsKey, selectors). + Err(err). + Msg("Unable to insert the recommendations") + return 0, err + } + log.Info(). + Int(organizationKey, int(orgID)). + Str(clusterKey, string(clusterName)). + Int(issuesCountKey, inserted). + Interface(createdAtKey, createdAt). + Strs(selectorsKey, selectors). + Msg("Recommendations inserted successfully") + + inserted = len(selectors) + return +} + +// getRuleKeyCreatedAtMap returns a map between +// (rule_fqdn, error_key) -> created_at +// for each rule_hit rows matching given +// orgId and clusterName +func (storage OCPRecommendationsDBStorage) getRuleKeyCreatedAtMap( + query string, + orgID types.OrgID, + clusterName types.ClusterName, +) ( + map[string]types.Timestamp, + error) { + impactedSinceRows, err := storage.connection.Query( + query, orgID, clusterName) + if err != nil { + log.Error().Err(err).Msg("error retrieving recommendation timestamp") + return nil, err + } + defer closeRows(impactedSinceRows) + + RuleKeyCreatedAt := make(map[string]types.Timestamp) + for impactedSinceRows.Next() { + var ruleFqdn string + var errorKey string + var oldTime time.Time + err := impactedSinceRows.Scan( + &ruleFqdn, + &errorKey, + &oldTime, + ) + if err != nil { + log.Error().Err(err).Msg("error scanning for rule id -> created_at map") + continue + } + newTime := types.Timestamp(oldTime.UTC().Format(time.RFC3339)) + RuleKeyCreatedAt[ruleFqdn+errorKey] = newTime + } + return RuleKeyCreatedAt, err +} + +// WriteReportForCluster writes result (health status) for selected cluster for given organization +func (storage OCPRecommendationsDBStorage) WriteReportForCluster( + orgID types.OrgID, + clusterName types.ClusterName, + report types.ClusterReport, + rules []types.ReportItem, + lastCheckedTime time.Time, + gatheredAt time.Time, + storedAtTime time.Time, + _ types.RequestID, +) error { + // Skip writing the report if it isn't newer than a report + // that is already in the database for the same cluster. + if oldLastChecked, exists := storage.clustersLastChecked[clusterName]; exists && !lastCheckedTime.After(oldLastChecked) { + return types.ErrOldReport + } + + if storage.dbDriverType != types.DBDriverPostgres { + return fmt.Errorf("writing report with DB %v is not supported", storage.dbDriverType) + } + + // Begin a new transaction. + tx, err := storage.connection.Begin() + if err != nil { + return err + } + + err = func(tx *sql.Tx) error { + // Check if there is a more recent report for the cluster already in the database. + rows, err := tx.Query( + "SELECT last_checked_at FROM report WHERE org_id = $1 AND cluster = $2 AND last_checked_at > $3;", + orgID, clusterName, lastCheckedTime) + err = types.ConvertDBError(err, []interface{}{orgID, clusterName}) + if err != nil { + log.Error().Err(err).Msg("Unable to look up the most recent report in the database") + return err + } + + defer closeRows(rows) + + // If there is one, print a warning and discard the report (don't update it). + if rows.Next() { + log.Warn().Msgf("Database already contains report for organization %d and cluster name %s more recent than %v", + orgID, clusterName, lastCheckedTime) + return nil + } + + err = storage.updateReport(tx, orgID, clusterName, report, rules, lastCheckedTime, gatheredAt, storedAtTime) + if err != nil { + return err + } + + storage.clustersLastChecked[clusterName] = lastCheckedTime + metrics.WrittenReports.Inc() + + return nil + }(tx) + + finishTransaction(tx, err) + + return err +} + +// WriteRecommendationsForCluster writes hitting rules in received report for selected cluster +func (storage OCPRecommendationsDBStorage) WriteRecommendationsForCluster( + orgID types.OrgID, + clusterName types.ClusterName, + stringReport types.ClusterReport, + creationTime types.Timestamp, +) (err error) { + var report types.ReportRules + err = json.Unmarshal([]byte(stringReport), &report) + if err != nil { + return err + } + tx, err := storage.connection.Begin() + if err != nil { + return err + } + + impactedSinceMap := make(map[string]ctypes.Timestamp) + err = func(tx *sql.Tx) error { + var deleted int64 + // Delete current recommendations for the cluster if some report has been previously stored for this cluster + if _, ok := storage.clustersLastChecked[clusterName]; ok { + // Get impacted_since if present + query := "SELECT rule_fqdn, error_key, impacted_since FROM recommendation WHERE org_id = $1 AND cluster_id = $2;" + impactedSinceMap, err = storage.getRuleKeyCreatedAtMap( + query, orgID, clusterName) + if err != nil { + log.Error().Err(err).Msgf("Unable to get recommendation impacted_since") + } + log.Info().Any("impactedSinceMap", impactedSinceMap).Msg("Impacted since map without limit") + // it is needed to use `org_id = $1` condition there + // because it allows DB to use proper btree indexing + // and not slow sequential scan + result, err := tx.Exec( + "DELETE FROM recommendation WHERE org_id = $1 AND cluster_id = $2;", orgID, clusterName) + err = types.ConvertDBError(err, []interface{}{clusterName}) + if err != nil { + log.Error().Err(err).Msgf("Unable to delete the existing recommendations for %s", clusterName) + return err + } + + // As the documentation says: + // RowsAffected returns the number of rows affected by an + // update, insert, or delete. Not every database or database + // driver may support this. + // So we might run in a scenario where we don't have metrics + // if the driver doesn't help. + deleted, err = result.RowsAffected() + if err != nil { + log.Error().Err(err).Msg("Unable to retrieve number of deleted rows with current driver") + return err + } + } + + inserted, err := storage.insertRecommendations(tx, orgID, clusterName, report, creationTime, impactedSinceMap) + if err != nil { + return err + } + + if deleted != 0 || inserted != 0 { + log.Info(). + Int64("Deleted", deleted). + Int("Inserted", inserted). + Int(organizationKey, int(orgID)). + Str(clusterKey, string(clusterName)). + Msg("Updated recommendation table") + } + // updateRecommendationsMetrics(string(clusterName), float64(deleted), float64(inserted)) + + return nil + }(tx) + + finishTransaction(tx, err) + + return err +} + +// finishTransaction finishes the transaction depending on err. err == nil -> commit, err != nil -> rollback +func finishTransaction(tx *sql.Tx, err error) { + if err != nil { + rollbackError := tx.Rollback() + if rollbackError != nil { + log.Err(rollbackError).Msgf("error when trying to rollback a transaction") + } + } else { + commitError := tx.Commit() + if commitError != nil { + log.Err(commitError).Msgf("error when trying to commit a transaction") + } + } +} + +// ReadRecommendationsForClusters reads all recommendations from recommendation table for given organization +func (storage OCPRecommendationsDBStorage) ReadRecommendationsForClusters( + clusterList []string, + orgID types.OrgID, +) (ctypes.RecommendationImpactedClusters, error) { + impactedClusters := make(ctypes.RecommendationImpactedClusters, 0) + + if len(clusterList) < 1 { + return impactedClusters, nil + } + + // #nosec G201 + whereClause := fmt.Sprintf(`WHERE org_id = $1 AND cluster_id IN (%v)`, inClauseFromSlice(clusterList)) + + // disable "G202 (CWE-89): SQL string concatenation" + // #nosec G202 + query := ` + SELECT + rule_id, cluster_id + FROM + recommendation + ` + whereClause + + // #nosec G202 + rows, err := storage.connection.Query(query, orgID) + if err != nil { + log.Error().Err(err).Msg("query to get recommendations") + return impactedClusters, err + } + + for rows.Next() { + var ( + ruleID types.RuleID + clusterID types.ClusterName + ) + + err := rows.Scan( + &ruleID, + &clusterID, + ) + if err != nil { + log.Error().Err(err).Msg("read one recommendation") + return impactedClusters, err + } + + impactedClusters[ruleID] = append(impactedClusters[ruleID], clusterID) + } + + return impactedClusters, nil +} + +// ReadClusterListRecommendations retrieves cluster IDs and a list of hitting rules for each one +func (storage OCPRecommendationsDBStorage) ReadClusterListRecommendations( + clusterList []string, + orgID types.OrgID, +) (ctypes.ClusterRecommendationMap, error) { + clusterMap := make(ctypes.ClusterRecommendationMap, 0) + + if len(clusterList) < 1 { + return clusterMap, nil + } + + // we have to select from report table primarily because we need to show last_checked_at even if there + // are no rule hits (which means there are no rows in recommendation table for that cluster) + + // disable "G202 (CWE-89): SQL string concatenation" + // #nosec G202 + query := ` + SELECT + rep.cluster, rep.last_checked_at, COALESCE(rec.rule_id, '') + FROM + report rep + LEFT JOIN + recommendation rec + ON + rep.org_id = rec.org_id AND + rep.cluster = rec.cluster_id + WHERE + rep.org_id = $1 AND rep.cluster IN (%v) + ` + // #nosec G201 + query = fmt.Sprintf(query, inClauseFromSlice(clusterList)) + + rows, err := storage.connection.Query(query, orgID) + if err != nil { + log.Error().Err(err).Msg("query to get recommendations") + return clusterMap, err + } + + for rows.Next() { + var ( + clusterID ctypes.ClusterName + ruleID ctypes.RuleID + timestamp time.Time + ) + + err := rows.Scan( + &clusterID, + ×tamp, + &ruleID, + ) + if err != nil { + log.Error().Err(err).Msg("problem reading one recommendation") + return clusterMap, err + } + + if cluster, exists := clusterMap[clusterID]; exists { + cluster.Recommendations = append(cluster.Recommendations, ruleID) + clusterMap[clusterID] = cluster + } else { + // create entry in map for new cluster ID + clusterMap[clusterID] = ctypes.ClusterRecommendationList{ + // created at is the same for all rows for each cluster + CreatedAt: timestamp, + Recommendations: []ctypes.RuleID{ruleID}, + } + } + } + + storage.fillInMetadata(orgID, clusterMap) + return clusterMap, nil +} + +// ReportsCount reads number of all records stored in database +func (storage OCPRecommendationsDBStorage) ReportsCount() (int, error) { + count := -1 + err := storage.connection.QueryRow("SELECT count(*) FROM report;").Scan(&count) + err = types.ConvertDBError(err, nil) + + return count, err +} + +// DeleteReportsForOrg deletes all reports related to the specified organization from the storage. +func (storage OCPRecommendationsDBStorage) DeleteReportsForOrg(orgID types.OrgID) error { + _, err := storage.connection.Exec("DELETE FROM report WHERE org_id = $1;", orgID) + return err +} + +// DeleteReportsForCluster deletes all reports related to the specified cluster from the storage. +func (storage OCPRecommendationsDBStorage) DeleteReportsForCluster(clusterName types.ClusterName) error { + _, err := storage.connection.Exec("DELETE FROM report WHERE cluster = $1;", clusterName) + return err +} + +// GetConnection returns db connection(useful for testing) +func (storage OCPRecommendationsDBStorage) GetConnection() *sql.DB { + return storage.connection +} + +// WriteConsumerError writes a report about a consumer error into the storage. +func (storage OCPRecommendationsDBStorage) WriteConsumerError(msg *sarama.ConsumerMessage, consumerErr error) error { + _, err := storage.connection.Exec(` + INSERT INTO consumer_error (topic, partition, topic_offset, key, produced_at, consumed_at, message, error) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, + msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Timestamp, time.Now().UTC(), msg.Value, consumerErr.Error()) + + return err +} + +// GetDBDriverType returns db driver type +func (storage OCPRecommendationsDBStorage) GetDBDriverType() types.DBDriver { + return storage.dbDriverType +} + +// DoesClusterExist checks if cluster with this id exists +func (storage OCPRecommendationsDBStorage) DoesClusterExist(clusterID types.ClusterName) (bool, error) { + err := storage.connection.QueryRow( + "SELECT cluster FROM report WHERE cluster = $1", clusterID, + ).Scan(&clusterID) + if err == sql.ErrNoRows { + return false, nil + } else if err != nil { + return false, err + } + + return true, nil +} + +// ListOfDisabledClusters function returns list of all clusters disabled for a rule from a +// specified account. +func (storage OCPRecommendationsDBStorage) ListOfDisabledClusters( + orgID types.OrgID, + ruleID types.RuleID, + errorKey types.ErrorKey, +) ( + disabledClusters []ctypes.DisabledClusterInfo, + err error, +) { + // select disabled rules from toggle table and the latest feedback from disable_feedback table + // LEFT join and COALESCE are used for the feedback, because feedback is filled by different + // request than toggle, so it might be empty/null + query := ` + SELECT + toggle.cluster_id, + toggle.disabled_at, + COALESCE(feedback.message, '') + FROM + cluster_rule_toggle toggle + LEFT JOIN + cluster_user_rule_disable_feedback feedback + ON feedback.updated_at = ( + SELECT updated_at + FROM cluster_user_rule_disable_feedback + WHERE cluster_id = toggle.cluster_id + AND org_id = $1 + AND rule_id = $2 + AND error_key = $3 + ORDER BY updated_at DESC + LIMIT 1 + ) + WHERE + toggle.org_id = $1 + AND toggle.rule_id = $2 + AND toggle.error_key = $3 + AND toggle.disabled = $4 + ORDER BY + toggle.disabled_at DESC + ` + + // run the query against database + rows, err := storage.connection.Query(query, orgID, ruleID, errorKey, RuleToggleDisable) + + // return empty list in case of any error + if err != nil { + return disabledClusters, err + } + defer closeRows(rows) + + for rows.Next() { + var disabledCluster ctypes.DisabledClusterInfo + + err = rows.Scan( + &disabledCluster.ClusterID, + &disabledCluster.DisabledAt, + &disabledCluster.Justification, + ) + + if err != nil { + log.Error().Err(err).Msg("ReadListOfDisabledRules") + // return partially filled slice + error + return disabledClusters, err + } + + // append disabled cluster read from database to a slice + disabledClusters = append(disabledClusters, disabledCluster) + } + + return disabledClusters, nil +} diff --git a/storage/storage_test.go b/storage/ocp_recommendations_storage_test.go similarity index 89% rename from storage/storage_test.go rename to storage/ocp_recommendations_storage_test.go index bf39798f5..4866e19d1 100644 --- a/storage/storage_test.go +++ b/storage/ocp_recommendations_storage_test.go @@ -21,7 +21,6 @@ import ( "database/sql" "database/sql/driver" "fmt" - "os" "strings" "testing" "time" @@ -50,7 +49,7 @@ func init() { zerolog.SetGlobalLevel(zerolog.WarnLevel) } -func assertNumberOfReports(t *testing.T, mockStorage storage.Storage, expectedNumberOfReports int) { +func assertNumberOfReports(t *testing.T, mockStorage storage.OCPRecommendationsStorage, expectedNumberOfReports int) { numberOfReports, err := mockStorage.ReportsCount() helpers.FailOnError(t, err) assert.Equal(t, expectedNumberOfReports, numberOfReports) @@ -58,7 +57,7 @@ func assertNumberOfReports(t *testing.T, mockStorage storage.Storage, expectedNu func checkReportForCluster( t *testing.T, - s storage.Storage, + s storage.OCPRecommendationsStorage, orgID types.OrgID, clusterName types.ClusterName, expected []types.RuleOnReport, @@ -73,7 +72,7 @@ func checkReportForCluster( func writeReportForCluster( t *testing.T, - storageImpl storage.Storage, + storageImpl storage.OCPRecommendationsStorage, orgID types.OrgID, clusterName types.ClusterName, clusterReport types.ClusterReport, @@ -86,7 +85,7 @@ func writeReportForCluster( // TestNewStorageError checks whether constructor for new storage returns error for improper storage configuration func TestNewStorageError(t *testing.T) { - _, err := storage.New(storage.Configuration{ + _, err := storage.NewOCPRecommendationsStorage(storage.Configuration{ Driver: "non existing driver", Type: "sql", }) @@ -95,7 +94,7 @@ func TestNewStorageError(t *testing.T) { // TestNewStorageNoType checks whether constructor for new storage returns error for improper storage configuration func TestNewStorageNoType(t *testing.T) { - _, err := storage.New(storage.Configuration{ + _, err := storage.NewOCPRecommendationsStorage(storage.Configuration{ Driver: "non existing driver", }) assert.EqualError(t, err, "Unknown storage type ''") @@ -103,7 +102,7 @@ func TestNewStorageNoType(t *testing.T) { // TestNewStorageWrongType checks whether constructor for new storage returns error for improper storage configuration func TestNewStorageWrongType(t *testing.T) { - _, err := storage.New(storage.Configuration{ + _, err := storage.NewOCPRecommendationsStorage(storage.Configuration{ Driver: "non existing driver", Type: "foobar", }) @@ -112,7 +111,7 @@ func TestNewStorageWrongType(t *testing.T) { // TestNewStorageWithLogging tests creating new storage with logs func TestNewStorageWithLoggingError(t *testing.T) { - s, _ := storage.New(storage.Configuration{ + s, _ := storage.NewOCPRecommendationsStorage(storage.Configuration{ Driver: "postgres", PGPort: 1234, PGUsername: "user", @@ -126,16 +125,16 @@ func TestNewStorageWithLoggingError(t *testing.T) { // TestNewStorageReturnedImplementation check what implementation of storage is returnd func TestNewStorageReturnedImplementation(t *testing.T) { - s, _ := storage.New(storage.Configuration{ + s, _ := storage.NewOCPRecommendationsStorage(storage.Configuration{ Driver: "postgres", PGPort: 1234, PGUsername: "user", LogSQLQueries: true, Type: "sql", }) - assert.IsType(t, &storage.DBStorage{}, s) + assert.IsType(t, &storage.OCPRecommendationsDBStorage{}, s) - s, _ = storage.New(storage.Configuration{ + s, _ = storage.NewOCPRecommendationsStorage(storage.Configuration{ Driver: "postgres", PGPort: 1234, PGUsername: "user", @@ -144,19 +143,19 @@ func TestNewStorageReturnedImplementation(t *testing.T) { }) assert.IsType(t, &storage.RedisStorage{}, s) - s, _ = storage.New(storage.Configuration{ + s, _ = storage.NewOCPRecommendationsStorage(storage.Configuration{ Driver: "postgres", PGPort: 1234, PGUsername: "user", LogSQLQueries: true, Type: "noop", }) - assert.IsType(t, &storage.NoopStorage{}, s) + assert.IsType(t, &storage.NoopOCPStorage{}, s) } // TestDBStorageReadReportForClusterEmptyTable check the behaviour of method ReadReportForCluster func TestDBStorageReadReportForClusterEmptyTable(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() _, _, _, _, err := mockStorage.ReadReportForCluster(testdata.OrgID, testdata.ClusterName) @@ -176,7 +175,7 @@ func TestDBStorageReadReportForClusterEmptyTable(t *testing.T) { // TestDBStorageReadReportForClusterClosedStorage check the behaviour of method ReadReportForCluster func TestDBStorageReadReportForClusterClosedStorage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // we need to close storage right now closer() @@ -186,7 +185,7 @@ func TestDBStorageReadReportForClusterClosedStorage(t *testing.T) { // TestDBStorageReadReportForCluster check the behaviour of method ReadReportForCluster func TestDBStorageReadReportForCluster(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed) @@ -195,7 +194,7 @@ func TestDBStorageReadReportForCluster(t *testing.T) { // TestDBStorageGetOrgIDByClusterID check the behaviour of method GetOrgIDByClusterID func TestDBStorageGetOrgIDByClusterID(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed) @@ -206,10 +205,10 @@ func TestDBStorageGetOrgIDByClusterID(t *testing.T) { } func TestDBStorageGetOrgIDByClusterID_Error(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() - dbStorage := mockStorage.(*storage.DBStorage) + dbStorage := mockStorage.(*storage.OCPRecommendationsDBStorage) connection := dbStorage.GetConnection() query := ` @@ -246,7 +245,7 @@ func TestDBStorageGetOrgIDByClusterID_Error(t *testing.T) { // TestDBStorageGetOrgIDByClusterIDFailing check the behaviour of method GetOrgIDByClusterID for not existed ClusterID func TestDBStorageGetOrgIDByClusterIDFailing(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() orgID, err := mockStorage.GetOrgIDByClusterID(testdata.ClusterName) @@ -257,7 +256,7 @@ func TestDBStorageGetOrgIDByClusterIDFailing(t *testing.T) { // TestDBStorageReadReportNoTable check the behaviour of method ReadReportForCluster // when the table with results does not exist func TestDBStorageReadReportNoTable(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() _, _, _, _, err := mockStorage.ReadReportForCluster(testdata.OrgID, testdata.ClusterName) @@ -266,7 +265,7 @@ func TestDBStorageReadReportNoTable(t *testing.T) { // TestDBStorageWriteReportForClusterClosedStorage check the behaviour of method WriteReportForCluster func TestDBStorageWriteReportForClusterClosedStorage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // we need to close storage right now closer() @@ -285,7 +284,7 @@ func TestDBStorageWriteReportForClusterClosedStorage(t *testing.T) { // TestDBStorageWriteReportForClusterClosedStorage check the behaviour of method WriteReportForCluster func TestDBStorageWriteReportForClusterUnsupportedDriverError(t *testing.T) { - fakeStorage := storage.NewFromConnection(nil, -1) + fakeStorage := storage.NewOCPRecommendationsFromConnection(nil, -1) // no need to close it err := fakeStorage.WriteReportForCluster( @@ -304,7 +303,7 @@ func TestDBStorageWriteReportForClusterUnsupportedDriverError(t *testing.T) { // TestDBStorageWriteReportForClusterMoreRecentInDB checks that older report // will not replace a more recent one when writing a report to storage. func TestDBStorageWriteReportForClusterMoreRecentInDB(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() newerTime := time.Now().UTC() @@ -339,7 +338,7 @@ func TestDBStorageWriteReportForClusterMoreRecentInDB(t *testing.T) { // TestDBStorageClusterOrgTransfer checks the behaviour of report storage in case of cluster org transfer func TestDBStorageClusterOrgTransfer(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() cluster1ID, cluster2ID := types.ClusterName("aaaaaaaa-1234-cccc-dddd-eeeeeeeeeeee"), @@ -391,16 +390,12 @@ func TestDBStorageClusterOrgTransfer(t *testing.T) { // TestDBStorageWriteReportForClusterDroppedReportTable checks the error // returned when trying to SELECT from a dropped/missing report table. func TestDBStorageWriteReportForClusterDroppedReportTable(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() - connection := storage.GetConnection(mockStorage.(*storage.DBStorage)) + connection := storage.GetConnection(mockStorage.(*storage.OCPRecommendationsDBStorage)) - query := "DROP TABLE report" - if os.Getenv("INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB") == "postgres" { - query += " CASCADE" - } - query += ";" + query := "DROP TABLE report CASCADE;" _, err := connection.Exec(query) helpers.FailOnError(t, err) @@ -414,7 +409,7 @@ func TestDBStorageWriteReportForClusterDroppedReportTable(t *testing.T) { } func TestDBStorageWriteReportForClusterExecError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() createReportTableWithBadClusterField(t, mockStorage) @@ -426,10 +421,9 @@ func TestDBStorageWriteReportForClusterExecError(t *testing.T) { ) assert.Error(t, err) - const sqliteErrMessage = "CHECK constraint failed: report" - const postgresErrMessage = "pq: invalid input syntax for integer" - if err.Error() != sqliteErrMessage && !strings.HasPrefix(err.Error(), postgresErrMessage) { - t.Fatalf("expected on of: \n%v\n%v\ngot:\n%v", sqliteErrMessage, postgresErrMessage, err.Error()) + const postgresErrMessage = "pq: invalid input syntax for type integer" + if !strings.HasPrefix(err.Error(), postgresErrMessage) { + t.Fatalf("expected: \n%v\ngot:\n%v", postgresErrMessage, err.Error()) } } @@ -470,7 +464,7 @@ func TestDBStorageWriteReportForClusterFakePostgresOK(t *testing.T) { // TestDBStorageListOfOrgs check the behaviour of method ListOfOrgs func TestDBStorageListOfOrgs(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() writeReportForCluster(t, mockStorage, 1, "1deb586c-fb85-4db4-ae5b-139cdbdf77ae", testdata.ClusterReportEmpty, testdata.ReportEmptyRulesParsed) @@ -483,7 +477,7 @@ func TestDBStorageListOfOrgs(t *testing.T) { } func TestDBStorageListOfOrgsNoTable(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() _, err := mockStorage.ListOfOrgs() @@ -492,7 +486,7 @@ func TestDBStorageListOfOrgsNoTable(t *testing.T) { // TestDBStorageListOfOrgsClosedStorage check the behaviour of method ListOfOrgs func TestDBStorageListOfOrgsClosedStorage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // we need to close storage right now closer() @@ -502,7 +496,7 @@ func TestDBStorageListOfOrgsClosedStorage(t *testing.T) { // TestDBStorageListOfClustersFor check the behaviour of method ListOfClustersForOrg func TestDBStorageListOfClustersForOrg(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() cluster1ID, cluster2ID, cluster3ID := testdata.GetRandomClusterID(), testdata.GetRandomClusterID(), testdata.GetRandomClusterID() @@ -528,7 +522,7 @@ func TestDBStorageListOfClustersForOrg(t *testing.T) { } func TestDBStorageListOfClustersTimeLimit(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() // writeReportForCluster writes the report at time.Now() @@ -558,7 +552,7 @@ func TestDBStorageListOfClustersTimeLimit(t *testing.T) { } func TestDBStorageListOfClustersNoTable(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() _, err := mockStorage.ListOfClustersForOrg(5, time.Now().Add(-time.Hour)) @@ -567,7 +561,7 @@ func TestDBStorageListOfClustersNoTable(t *testing.T) { // TestDBStorageListOfClustersClosedStorage check the behaviour of method ListOfOrgs func TestDBStorageListOfClustersClosedStorage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // we need to close storage right now closer() @@ -577,7 +571,7 @@ func TestDBStorageListOfClustersClosedStorage(t *testing.T) { // TestMockDBReportsCount check the behaviour of method ReportsCount func TestMockDBReportsCount(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() assertNumberOfReports(t, mockStorage, 0) @@ -588,7 +582,7 @@ func TestMockDBReportsCount(t *testing.T) { } func TestMockDBReportsCountNoTable(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() _, err := mockStorage.ReportsCount() @@ -596,7 +590,7 @@ func TestMockDBReportsCountNoTable(t *testing.T) { } func TestMockDBReportsCountClosedStorage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) // we need to close storage right now closer() @@ -605,7 +599,7 @@ func TestMockDBReportsCountClosedStorage(t *testing.T) { } func TestDBStorageNewPostgresqlError(t *testing.T) { - s, _ := storage.New(storage.Configuration{ + s, _ := storage.NewOCPRecommendationsStorage(storage.Configuration{ Driver: "postgres", PGHost: "non-existing-host", PGPort: 12345, @@ -614,7 +608,7 @@ func TestDBStorageNewPostgresqlError(t *testing.T) { }) err := s.Init() - assert.Contains(t, err.Error(), "no such host") + assert.Contains(t, err.Error(), "non-existing-host") } func mustWriteReport( @@ -649,10 +643,10 @@ func TestDBStorageListOfOrgsLogError(t *testing.T) { buf := new(bytes.Buffer) log.Logger = zerolog.New(buf) - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() - connection := storage.GetConnection(mockStorage.(*storage.DBStorage)) + connection := storage.GetConnection(mockStorage.(*storage.OCPRecommendationsDBStorage)) // write illegal negative org_id mustWriteReport(t, connection, -1, testdata.ClusterName, testdata.ClusterReportEmpty) @@ -697,7 +691,7 @@ func TestDBStorageDeleteReports(t *testing.T) { "DeleteReportsForOrg", "DeleteReportsForCluster", } { func() { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() assertNumberOfReports(t, mockStorage, 0) @@ -731,7 +725,7 @@ func TestDBStorageDeleteReports(t *testing.T) { } func TestDBStorage_ReadReportForClusterByClusterName_OK(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -746,7 +740,7 @@ func TestDBStorage_ReadReportForClusterByClusterName_OK(t *testing.T) { } func TestDBStorage_CheckIfClusterExists_ClusterDoesNotExist(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() _, _, err := mockStorage.ReadReportForClusterByClusterName(testdata.ClusterName) @@ -758,24 +752,15 @@ func TestDBStorage_CheckIfClusterExists_ClusterDoesNotExist(t *testing.T) { } func TestDBStorage_CheckIfClusterExists_DBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) closer() _, _, err := mockStorage.ReadReportForClusterByClusterName(testdata.ClusterName) assert.EqualError(t, err, "sql: database is closed") } -func TestDBStorage_NewSQLite(t *testing.T) { - _, err := storage.New(storage.Configuration{ - Driver: "sqlite3", - SQLiteDataSource: ":memory:", - Type: "sql", - }) - helpers.FailOnError(t, err) -} - func TestDBStorageWriteConsumerError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() testTopic := "topic" @@ -797,7 +782,7 @@ func TestDBStorageWriteConsumerError(t *testing.T) { assert.NoError(t, err) - conn := storage.GetConnection(mockStorage.(*storage.DBStorage)) + conn := storage.GetConnection(mockStorage.(*storage.OCPRecommendationsDBStorage)) row := conn.QueryRow(` SELECT key, message, produced_at, consumed_at, error FROM consumer_error @@ -820,10 +805,10 @@ func TestDBStorageWriteConsumerError(t *testing.T) { } func TestDBStorage_Init(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() - dbStorage := mockStorage.(*storage.DBStorage) + dbStorage := mockStorage.(*storage.OCPRecommendationsDBStorage) err := dbStorage.MigrateToLatest() helpers.FailOnError(t, err) @@ -840,12 +825,12 @@ func TestDBStorage_Init(t *testing.T) { } func TestDBStorage_Init_Error(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() createReportTableWithBadClusterField(t, mockStorage) - connection := storage.GetConnection(mockStorage.(*storage.DBStorage)) + connection := storage.GetConnection(mockStorage.(*storage.OCPRecommendationsDBStorage)) // create a table with a bad type _, err := connection.Exec(` @@ -863,13 +848,12 @@ func TestDBStorage_Init_Error(t *testing.T) { ) } -func createReportTableWithBadClusterField(t *testing.T, mockStorage storage.Storage) { - connection := storage.GetConnection(mockStorage.(*storage.DBStorage)) - +func createReportTableWithBadClusterField(t *testing.T, mockStorage storage.OCPRecommendationsStorage) { + connection := storage.GetConnection(mockStorage.(*storage.OCPRecommendationsDBStorage)) query := ` CREATE TABLE report ( org_id INTEGER NOT NULL, - cluster INTEGER NOT NULL UNIQUE CHECK(typeof(cluster) = 'integer'), + cluster INTEGER NOT NULL UNIQUE, report VARCHAR NOT NULL, reported_at TIMESTAMP, last_checked_at TIMESTAMP, @@ -878,20 +862,6 @@ func createReportTableWithBadClusterField(t *testing.T, mockStorage storage.Stor ) ` - if os.Getenv("INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB") == "postgres" { - query = ` - CREATE TABLE report ( - org_id INTEGER NOT NULL, - cluster INTEGER NOT NULL UNIQUE, - report VARCHAR NOT NULL, - reported_at TIMESTAMP, - last_checked_at TIMESTAMP, - kafka_offset BIGINT NOT NULL DEFAULT 0, - PRIMARY KEY(org_id, cluster) - ) - ` - } - // create a table with a bad type _, err := connection.Exec(query) helpers.FailOnError(t, err) @@ -951,7 +921,7 @@ func TestArgsWithClusterNames(t *testing.T) { // TestDBStorageReadReportsForClusters1 check the behaviour of method // ReadReportForClusters func TestDBStorageReadReportsForClusters1(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed) @@ -968,7 +938,7 @@ func TestDBStorageReadReportsForClusters1(t *testing.T) { // TestDBStorageReadReportsForClusters2 check the behaviour of method // ReadReportForClusters func TestDBStorageReadReportsForClusters2(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed) @@ -985,7 +955,7 @@ func TestDBStorageReadReportsForClusters2(t *testing.T) { // TestDBStorageReadReportsForClusters3 check the behaviour of method // ReadReportForClusters func TestDBStorageReadReportsForClusters3(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed) @@ -1003,7 +973,7 @@ func TestDBStorageReadReportsForClusters3(t *testing.T) { // TestDBStorageReadOrgIDsForClusters0_Reproducer reproduces a bug caused by improper in clause handling func TestDBStorageReadOrgIDsForClusters0_Reproducer(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed) @@ -1022,7 +992,7 @@ func TestDBStorageReadOrgIDsForClusters0_Reproducer(t *testing.T) { // TestDBStorageReadOrgIDsForClusters1 check the behaviour of method // ReadOrgIDsForClusters func TestDBStorageReadOrgIDsForClusters1(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed) @@ -1039,7 +1009,7 @@ func TestDBStorageReadOrgIDsForClusters1(t *testing.T) { // TestDBStorageReadOrgIDsForClusters2 check the behaviour of method // ReadOrgIDsForClusters func TestDBStorageReadOrgIDsForClusters2(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() writeReportForCluster(t, mockStorage, testdata.OrgID, testdata.ClusterName, `{"report":{}}`, testdata.ReportEmptyRulesParsed) @@ -1056,7 +1026,7 @@ func TestDBStorageReadOrgIDsForClusters2(t *testing.T) { // TestDBStorageWriteRecommendationsForClusterClosedStorage check the behaviour of method WriteRecommendationsForCluster func TestDBStorageWriteRecommendationsForClusterClosedStorage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) // we need to close storage right now closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1092,7 +1062,7 @@ func TestDBStorageWriteRecommendationForClusterNoConflict(t *testing.T) { // TestDBStorageInsertRecommendations checks that only hitting rules in the report // are inserted in the recommendation table func TestDBStorageInsertRecommendations(t *testing.T) { - mockStorage, expects := ira_helpers.MustGetMockStorageWithExpectsForDriver(t, types.DBDriverSQLite3) + mockStorage, expects := ira_helpers.MustGetMockStorageWithExpectsForDriver(t, types.DBDriverPostgres) defer ira_helpers.MustCloseMockStorageWithExpects(t, mockStorage, expects) expects.ExpectBegin() @@ -1115,7 +1085,7 @@ func TestDBStorageInsertRecommendations(t *testing.T) { // and created_at match impactedSince := RecommendationImpactedSinceMap inserted, err := storage.InsertRecommendations( - mockStorage.(*storage.DBStorage), + mockStorage.(*storage.OCPRecommendationsDBStorage), testdata.OrgID, testdata.ClusterName, report, RecommendationCreatedAtTimestamp, impactedSince) assert.Equal(t, 3, inserted) @@ -1173,7 +1143,7 @@ func TestDBStorageWriteRecommendationForClusterAlreadyStoredAndDeleted(t *testin helpers.FailOnError(t, err) // Need to update clustersLastChecked as would be done on init and in writeReportForClusters - dbStorage := mockStorage.(*storage.DBStorage) + dbStorage := mockStorage.(*storage.OCPRecommendationsDBStorage) storage.SetClustersLastChecked(dbStorage, testdata.ClusterName, time.Now()) expects.ExpectBegin() @@ -1198,7 +1168,7 @@ func TestDBStorageWriteRecommendationForClusterAlreadyStoredAndDeleted(t *testin // TestDBStorageInsertRecommendationsNoRuleHit checks that no // recommendations are inserted if there is no rule hits in the report func TestDBStorageInsertRecommendationsNoRuleHit(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() report := types.ReportRules{ @@ -1209,7 +1179,7 @@ func TestDBStorageInsertRecommendationsNoRuleHit(t *testing.T) { // impactedSincefirst time a recommendation is inserted impacted and created_at match impactedSince := RecommendationImpactedSinceMap inserted, err := storage.InsertRecommendations( - mockStorage.(*storage.DBStorage), testdata.OrgID, testdata.ClusterName, + mockStorage.(*storage.OCPRecommendationsDBStorage), testdata.OrgID, testdata.ClusterName, report, RecommendationCreatedAtTimestamp, impactedSince) assert.Equal(t, 0, inserted) @@ -1219,7 +1189,7 @@ func TestDBStorageInsertRecommendationsNoRuleHit(t *testing.T) { // TestDBStorageReadRecommendationsForClusters checks that stored recommendations // are retrieved correctly func TestDBStorageReadRecommendationsForClusters(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1243,7 +1213,7 @@ func TestDBStorageReadRecommendationsForClusters(t *testing.T) { // TestDBStorageReadRecommendationsForClustersMoreClusters checks that stored recommendations // for multiplpe clusters is calculated correctly func TestDBStorageReadRecommendationsForClustersMoreClusters(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusterList := make([]string, 3) @@ -1285,7 +1255,7 @@ func TestDBStorageReadRecommendationsForClustersMoreClusters(t *testing.T) { // TestDBStorageReadRecommendationsForClustersNoRecommendations checks that when no recommendations // are stored, it is an OK state func TestDBStorageReadRecommendationsForClustersNoRecommendations(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1303,7 +1273,7 @@ func TestDBStorageReadRecommendationsForClustersNoRecommendations(t *testing.T) // TestDBStorageReadRecommendationsForClustersEmptyList_Reproducer reproduces a bug caused by improper in clause handling func TestDBStorageReadRecommendationsForClustersEmptyList_Reproducer(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1322,7 +1292,7 @@ func TestDBStorageReadRecommendationsForClustersEmptyList_Reproducer(t *testing. // TestDBStorageReadRecommendationsGetSelectedClusters loads several recommendations for the same org // but "simulates" a situation where we only get a subset of them from the AMS API func TestDBStorageReadRecommendationsGetSelectedClusters(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusterList := make([]string, 3) @@ -1354,7 +1324,7 @@ func TestDBStorageReadRecommendationsGetSelectedClusters(t *testing.T) { // TestDBStorageReadRecommendationsForNonexistingClusters simulates getting a list of clusters where // we have none in the DB func TestDBStorageReadRecommendationsForNonexistingClusters(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1375,7 +1345,7 @@ func TestDBStorageReadRecommendationsForNonexistingClusters(t *testing.T) { // TestDBStorageReadClusterListRecommendationsNoRecommendations checks that when no recommendations // are stored, it is an OK state func TestDBStorageReadClusterListRecommendationsNoRecommendations(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteReportForCluster( @@ -1399,7 +1369,7 @@ func TestDBStorageReadClusterListRecommendationsNoRecommendations(t *testing.T) // TestDBStorageReadClusterListRecommendationsDifferentCluster checks that when no recommendations // are stored, it is an OK state func TestDBStorageReadClusterListRecommendationsDifferentCluster(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.WriteRecommendationsForCluster( @@ -1422,7 +1392,7 @@ func TestDBStorageReadClusterListRecommendationsDifferentCluster(t *testing.T) { // TestDBStorageReadClusterListRecommendationsGet1Cluster loads several recommendations for the same org // but "simulates" a situation where we only get one of them from the AMS API func TestDBStorageReadClusterListRecommendationsGet1Cluster(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusterList := make([]string, 3) @@ -1464,7 +1434,7 @@ func TestDBStorageReadClusterListRecommendationsGet1Cluster(t *testing.T) { // TestDBStorageReadClusterListRecommendationsGet1Cluster loads several recommendations for the same org // but "simulates" a situation where we only get one of them from the AMS API func TestDBStorageReadClusterListRecommendationsGetMoreClusters(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusterList := make([]string, 3) @@ -1510,7 +1480,7 @@ func TestDBStorageReadClusterListRecommendationsGetMoreClusters(t *testing.T) { // using the "zero" time (if the report doesn't include a gathering time, its value will be // this one) func TestDBStorageWriteReportForClusterWithZeroGatheredTime(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() zeroTime := time.Time{} @@ -1529,7 +1499,7 @@ func TestDBStorageWriteReportForClusterWithZeroGatheredTime(t *testing.T) { } func TestDoesClusterExist(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() exist, err := mockStorage.DoesClusterExist(testdata.GetRandomClusterID()) diff --git a/storage/queries.go b/storage/queries.go index 24f6cd0c6..38c8b8b46 100644 --- a/storage/queries.go +++ b/storage/queries.go @@ -14,16 +14,7 @@ package storage -import "github.com/RedHatInsights/insights-results-aggregator/types" - -func (storage DBStorage) getReportUpsertQuery() string { - if storage.dbDriverType == types.DBDriverSQLite3 { - return ` - INSERT OR REPLACE INTO report(org_id, cluster, report, reported_at, last_checked_at, kafka_offset, gathered_at) - VALUES ($1, $2, $3, $4, $5, $6, $7) - ` - } - +func (storage OCPRecommendationsDBStorage) getReportUpsertQuery() string { return ` INSERT INTO report(org_id, cluster, report, reported_at, last_checked_at, kafka_offset, gathered_at) VALUES ($1, $2, $3, $4, $5, $6, $7) @@ -32,14 +23,7 @@ func (storage DBStorage) getReportUpsertQuery() string { ` } -func (storage DBStorage) getReportInfoUpsertQuery() string { - if storage.dbDriverType == types.DBDriverSQLite3 { - return ` - INSERT OR REPLACE INTO report_info(org_id, cluster_id, version_info) - VALUES ($1, $2, $3) - ` - } - +func (storage OCPRecommendationsDBStorage) getReportInfoUpsertQuery() string { return ` INSERT INTO report_info(org_id, cluster_id, version_info) VALUES ($1, $2, $3) @@ -47,3 +31,10 @@ func (storage DBStorage) getReportInfoUpsertQuery() string { DO UPDATE SET org_id = $1, version_info = $3 ` } + +func (storage DVORecommendationsDBStorage) getReportInsertQuery() string { + return ` + INSERT INTO dvo.dvo_report(org_id, cluster_id, namespace_id, namespace_name, report, recommendations, objects, reported_at, last_checked_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + ` +} diff --git a/storage/rating.go b/storage/rating.go index edf225ea9..f70aa34aa 100644 --- a/storage/rating.go +++ b/storage/rating.go @@ -26,7 +26,7 @@ import ( ) // RateOnRule function stores the vote (rating) from a given user to a rule+error key -func (storage *DBStorage) RateOnRule( +func (storage *OCPRecommendationsDBStorage) RateOnRule( orgID types.OrgID, ruleFqdn types.RuleID, errorKey types.ErrorKey, @@ -67,7 +67,7 @@ func (storage *DBStorage) RateOnRule( } // GetRuleRating retrieves rating for given rule and user -func (storage *DBStorage) GetRuleRating( +func (storage *OCPRecommendationsDBStorage) GetRuleRating( orgID types.OrgID, ruleSelector types.RuleSelector, ) ( diff --git a/storage/rating_test.go b/storage/rating_test.go index 6b7b9f047..ee576ab73 100644 --- a/storage/rating_test.go +++ b/storage/rating_test.go @@ -26,7 +26,7 @@ import ( ) func TestDBStorage_RateOnRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -38,7 +38,7 @@ func TestDBStorage_RateOnRule(t *testing.T) { } func TestDBStorage_GetRuleRating(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -56,7 +56,7 @@ func TestDBStorage_GetRuleRating(t *testing.T) { } func TestDBStorage_GetRuleRating_NotFound(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) diff --git a/storage/redis_storage.go b/storage/redis_storage.go index be8de05af..1072b8f57 100644 --- a/storage/redis_storage.go +++ b/storage/redis_storage.go @@ -21,7 +21,6 @@ import ( "strings" "time" - "github.com/RedHatInsights/insights-content-service/content" "github.com/Shopify/sarama" "github.com/rs/zerolog/log" @@ -30,6 +29,7 @@ import ( ctypes "github.com/RedHatInsights/insights-results-types" "github.com/RedHatInsights/insights-results-aggregator/metrics" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -71,6 +71,16 @@ func (storage *RedisStorage) Close() error { return nil } +// GetMigrations noop +func (storage *RedisStorage) GetMigrations() []migration.Migration { + return nil +} + +// GetMaxVersion noop +func (storage *RedisStorage) GetMaxVersion() migration.Version { + return migration.Version(0) +} + // ListOfOrgs noop func (*RedisStorage) ListOfOrgs() ([]types.OrgID, error) { return nil, nil @@ -232,11 +242,6 @@ func (*RedisStorage) DeleteReportsForCluster(types.ClusterName) error { return nil } -// LoadRuleContent noop -func (*RedisStorage) LoadRuleContent(content.RuleContentDirectory) error { - return nil -} - // GetRuleByID noop func (*RedisStorage) GetRuleByID(types.RuleID) (*types.Rule, error) { return nil, nil @@ -469,6 +474,11 @@ func (*RedisStorage) GetConnection() *sql.DB { return nil } +// GetDBSchema returns db schema (unused for Redis) +func (*RedisStorage) GetDBSchema() migration.Schema { + return migration.Schema("") +} + // PrintRuleDisableDebugInfo is a temporary helper function used to print form // cluster rule toggle related tables func (*RedisStorage) PrintRuleDisableDebugInfo() { diff --git a/storage/redis_storage_test.go b/storage/redis_storage_test.go index 30c4b8827..d0e0f4eb7 100644 --- a/storage/redis_storage_test.go +++ b/storage/redis_storage_test.go @@ -25,7 +25,6 @@ import ( "github.com/go-redis/redismock/v9" "github.com/stretchr/testify/assert" - "github.com/RedHatInsights/insights-content-service/content" "github.com/RedHatInsights/insights-operator-utils/redis" ctypes "github.com/RedHatInsights/insights-results-types" @@ -404,7 +403,6 @@ func TestRedisStorageEmptyMethods2(_ *testing.T) { _ = RedisStorage.AddFeedbackOnRuleDisable(clusterName, ruleID, errorKey, orgID, userID, "") _, _ = RedisStorage.GetUserFeedbackOnRuleDisable(clusterName, ruleID, errorKey, userID) _, _ = RedisStorage.GetUserFeedbackOnRule(clusterName, ruleID, errorKey, userID) - _ = RedisStorage.LoadRuleContent(content.RuleContentDirectory{}) _, _ = RedisStorage.GetRuleByID(ruleID) _, _ = RedisStorage.GetOrgIDByClusterID(clusterName) _, _ = RedisStorage.ListOfSystemWideDisabledRules(orgID) diff --git a/storage/rule_disable.go b/storage/rule_disable.go index eda45c600..237a4ff29 100644 --- a/storage/rule_disable.go +++ b/storage/rule_disable.go @@ -26,7 +26,7 @@ import ( // DisableRuleSystemWide disables the selected rule for all clusters visible to // given user -func (storage DBStorage) DisableRuleSystemWide( +func (storage OCPRecommendationsDBStorage) DisableRuleSystemWide( orgID types.OrgID, ruleID types.RuleID, errorKey types.ErrorKey, @@ -67,12 +67,12 @@ func (storage DBStorage) DisableRuleSystemWide( // EnableRuleSystemWide enables the selected rule for all clusters visible to // given user -func (storage DBStorage) EnableRuleSystemWide( +func (storage OCPRecommendationsDBStorage) EnableRuleSystemWide( orgID types.OrgID, ruleID types.RuleID, errorKey types.ErrorKey, ) error { - log.Info().Int("org_id", int(orgID)).Msgf("re-enabling rule %v|%v", ruleID, errorKey) + log.Info().Int(orgIDStr, int(orgID)).Msgf("re-enabling rule %v|%v", ruleID, errorKey) const query = `DELETE FROM rule_disable WHERE org_id = $1 @@ -98,7 +98,7 @@ func (storage DBStorage) EnableRuleSystemWide( } // UpdateDisabledRuleJustification change justification for already disabled rule -func (storage DBStorage) UpdateDisabledRuleJustification( +func (storage OCPRecommendationsDBStorage) UpdateDisabledRuleJustification( orgID types.OrgID, ruleID types.RuleID, errorKey types.ErrorKey, @@ -133,7 +133,7 @@ func (storage DBStorage) UpdateDisabledRuleJustification( } // ReadDisabledRule function returns disabled rule (if disabled) from database -func (storage DBStorage) ReadDisabledRule( +func (storage OCPRecommendationsDBStorage) ReadDisabledRule( orgID types.OrgID, ruleID types.RuleID, errorKey types.ErrorKey, ) (ctypes.SystemWideRuleDisable, bool, error) { var disabledRule ctypes.SystemWideRuleDisable @@ -185,7 +185,7 @@ func (storage DBStorage) ReadDisabledRule( // ListOfSystemWideDisabledRules function returns list of all rules that have been // disabled for all clusters by given user -func (storage DBStorage) ListOfSystemWideDisabledRules( +func (storage OCPRecommendationsDBStorage) ListOfSystemWideDisabledRules( orgID types.OrgID, ) ([]ctypes.SystemWideRuleDisable, error) { disabledRules := make([]ctypes.SystemWideRuleDisable, 0) diff --git a/storage/rule_disable_test.go b/storage/rule_disable_test.go index e87616216..2689d55da 100644 --- a/storage/rule_disable_test.go +++ b/storage/rule_disable_test.go @@ -29,7 +29,7 @@ import ( // Check the method DisableRuleSystemWide. func TestDBStorageDisableRuleSystemWide(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // try to call the method err := mockStorage.DisableRuleSystemWide( @@ -45,7 +45,7 @@ func TestDBStorageDisableRuleSystemWide(t *testing.T) { // Check the method DisableRuleSystemWide in case of DB error. func TestDBStorageDisableRuleSystemWideOnDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // close storage immediately closer() @@ -60,7 +60,7 @@ func TestDBStorageDisableRuleSystemWideOnDBError(t *testing.T) { // Check the method EnableRuleSystemWide. func TestDBStorageEnableRuleSystemWide(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // try to call the method err := mockStorage.EnableRuleSystemWide( @@ -78,7 +78,7 @@ func TestDBStorageEnableRuleSystemWide(t *testing.T) { // This shouldn't happen in real environment because // Re-enabling/Updating justification/Getting from the rule_disable table is used func TestDBStorageEnableRuleSystemWideDifferentUser(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) user1Justification := "first user reason" user2Justification := "second user reason" @@ -122,7 +122,7 @@ func TestDBStorageEnableRuleSystemWideDifferentUser(t *testing.T) { // Check the method EnableRuleSystemWide in case of DB error. func TestDBStorageEnableRuleSystemWideOnDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // close storage immediately closer() @@ -137,7 +137,7 @@ func TestDBStorageEnableRuleSystemWideOnDBError(t *testing.T) { // Check the method UpdateDisabledRuleJustification. func TestDBStorageUpdateDisabledRuleJustifiction(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // try to call the method err := mockStorage.UpdateDisabledRuleJustification( @@ -153,7 +153,7 @@ func TestDBStorageUpdateDisabledRuleJustifiction(t *testing.T) { // Check the method UpdateDisabledRuleJustification in case of DB error. func TestDBStorageUpdateDisabledRuleJustification(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // close storage immediately closer() @@ -168,7 +168,7 @@ func TestDBStorageUpdateDisabledRuleJustification(t *testing.T) { // Check the method ReadDisabledRule. func TestDBStorageReadDisabledRuleNoRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // try to call the method _, found, err := mockStorage.ReadDisabledRule( @@ -187,7 +187,7 @@ func TestDBStorageReadDisabledRuleNoRule(t *testing.T) { // Check the method ReadDisabledRule. func TestDBStorageReadDisabledRuleOneRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) const justification = "JUSTIFICATION" @@ -217,7 +217,7 @@ func TestDBStorageReadDisabledRuleOneRule(t *testing.T) { // Check the method ReadDisabledRule in case of DB error. func TestDBStorageReadDisabledRuleOnRBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // close storage immediately closer() @@ -232,7 +232,7 @@ func TestDBStorageReadDisabledRuleOnRBError(t *testing.T) { // Check the method ListOfSystemWideDisabledRules. func TestDBStorageListOfSystemWideDisabledRulesNoRules(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // try to call the method list, err := mockStorage.ListOfSystemWideDisabledRules(testdata.OrgID) @@ -249,7 +249,7 @@ func TestDBStorageListOfSystemWideDisabledRulesNoRules(t *testing.T) { // Check the method ListOfSystemWideDisabledRules. func TestDBStorageListOfSystemWideDisabledRulesOneRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) const justification = "JUSTIFICATION" @@ -279,7 +279,7 @@ func TestDBStorageListOfSystemWideDisabledRulesOneRule(t *testing.T) { // Check the method ListOfSystemWideDisabledRules. func TestDBStorageListOfSystemWideDisabledRulesTwoRules(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) const justification = "JUSTIFICATION" @@ -318,7 +318,7 @@ func TestDBStorageListOfSystemWideDisabledRulesTwoRules(t *testing.T) { // Check the method ListOfSystemWideDisabledRules in case of DB error. func TestDBStorageListOfSystemWideDisabledRulesDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // close storage immediately closer() diff --git a/storage/rule_feedback.go b/storage/rule_feedback.go index 119713749..68ae9cd66 100644 --- a/storage/rule_feedback.go +++ b/storage/rule_feedback.go @@ -39,7 +39,7 @@ type UserFeedbackOnRule struct { } // VoteOnRule likes or dislikes rule for cluster by user. If entry exists, it overwrites it -func (storage DBStorage) VoteOnRule( +func (storage OCPRecommendationsDBStorage) VoteOnRule( clusterID types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, @@ -52,7 +52,7 @@ func (storage DBStorage) VoteOnRule( } // AddOrUpdateFeedbackOnRule adds feedback on rule for cluster by user. If entry exists, it overwrites it -func (storage DBStorage) AddOrUpdateFeedbackOnRule( +func (storage OCPRecommendationsDBStorage) AddOrUpdateFeedbackOnRule( clusterID types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, @@ -65,7 +65,7 @@ func (storage DBStorage) AddOrUpdateFeedbackOnRule( // addOrUpdateUserFeedbackOnRuleForCluster adds or updates feedback // will update user vote and messagePtr if the pointers are not nil -func (storage DBStorage) addOrUpdateUserFeedbackOnRuleForCluster( +func (storage OCPRecommendationsDBStorage) addOrUpdateUserFeedbackOnRuleForCluster( clusterID types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, @@ -121,11 +121,11 @@ func (storage DBStorage) addOrUpdateUserFeedbackOnRuleForCluster( return nil } -func (storage DBStorage) constructUpsertClusterRuleUserFeedback(updateVote, updateMessage bool) (string, error) { +func (storage OCPRecommendationsDBStorage) constructUpsertClusterRuleUserFeedback(updateVote, updateMessage bool) (string, error) { var query string switch storage.dbDriverType { - case types.DBDriverSQLite3, types.DBDriverPostgres, types.DBDriverGeneral: + case types.DBDriverPostgres, types.DBDriverGeneral: query = ` INSERT INTO cluster_rule_user_feedback (cluster_id, rule_id, user_id, user_vote, added_at, updated_at, message, error_key, org_id) @@ -155,7 +155,7 @@ func (storage DBStorage) constructUpsertClusterRuleUserFeedback(updateVote, upda } // GetUserFeedbackOnRule gets user feedback from DB -func (storage DBStorage) GetUserFeedbackOnRule( +func (storage OCPRecommendationsDBStorage) GetUserFeedbackOnRule( clusterID types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, userID types.UserID, ) (*UserFeedbackOnRule, error) { feedback := UserFeedbackOnRule{} @@ -189,7 +189,7 @@ func (storage DBStorage) GetUserFeedbackOnRule( } // GetUserFeedbackOnRuleDisable gets user feedback from DB -func (storage DBStorage) GetUserFeedbackOnRuleDisable( +func (storage OCPRecommendationsDBStorage) GetUserFeedbackOnRuleDisable( clusterID types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, userID types.UserID, ) (*UserFeedbackOnRule, error) { feedback := UserFeedbackOnRule{} @@ -222,7 +222,7 @@ func (storage DBStorage) GetUserFeedbackOnRuleDisable( } // GetUserFeedbackOnRules gets user feedbacks for defined array of rule IDs from DB -func (storage DBStorage) GetUserFeedbackOnRules( +func (storage OCPRecommendationsDBStorage) GetUserFeedbackOnRules( clusterID types.ClusterName, rulesReport []types.RuleOnReport, userID types.UserID, ) (map[types.RuleID]types.UserVote, error) { ruleIDs := make([]string, 0) @@ -266,7 +266,7 @@ func (storage DBStorage) GetUserFeedbackOnRules( } // GetUserDisableFeedbackOnRules gets user disable feedbacks for defined array of rule IDs from DB -func (storage DBStorage) GetUserDisableFeedbackOnRules( +func (storage OCPRecommendationsDBStorage) GetUserDisableFeedbackOnRules( clusterID types.ClusterName, rulesReport []types.RuleOnReport, userID types.UserID, ) (map[types.RuleID]UserFeedbackOnRule, error) { feedbacks := make(map[types.RuleID]UserFeedbackOnRule) @@ -287,7 +287,7 @@ func (storage DBStorage) GetUserDisableFeedbackOnRules( } // AddFeedbackOnRuleDisable adds feedback on rule disable -func (storage DBStorage) AddFeedbackOnRuleDisable( +func (storage OCPRecommendationsDBStorage) AddFeedbackOnRuleDisable( clusterID types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, diff --git a/storage/rule_hit_preparation_test.go b/storage/rule_hit_preparation_test.go index aaab0795b..55b6803ce 100644 --- a/storage/rule_hit_preparation_test.go +++ b/storage/rule_hit_preparation_test.go @@ -25,7 +25,7 @@ import ( ) func TestDBStorage_getRuleHitInsertStatement(t *testing.T) { - fakeStorage := storage.NewFromConnection(nil, -1) + fakeStorage := storage.NewOCPRecommendationsFromConnection(nil, -1) r := fakeStorage.GetRuleHitInsertStatement(testdata.Report3RulesParsed) // 5*3 placeholders expected diff --git a/storage/rule_list.go b/storage/rule_list.go index dc5ba1f62..63f0f6776 100644 --- a/storage/rule_list.go +++ b/storage/rule_list.go @@ -37,7 +37,7 @@ type DisabledRuleReason struct { } // ListOfReasons function returns list of reasons for all disabled rules -func (storage DBStorage) ListOfReasons(userID types.UserID) ([]DisabledRuleReason, error) { +func (storage OCPRecommendationsDBStorage) ListOfReasons(userID types.UserID) ([]DisabledRuleReason, error) { reasons := make([]DisabledRuleReason, 0) query := `SELECT cluster_id, @@ -88,7 +88,7 @@ func (storage DBStorage) ListOfReasons(userID types.UserID) ([]DisabledRuleReaso // ListOfDisabledRules function returns list of all rules disabled from a // specified account. -func (storage DBStorage) ListOfDisabledRules(orgID types.OrgID) ([]ctypes.DisabledRule, error) { +func (storage OCPRecommendationsDBStorage) ListOfDisabledRules(orgID types.OrgID) ([]ctypes.DisabledRule, error) { disabledRules := make([]ctypes.DisabledRule, 0) query := `SELECT cluster_id, @@ -141,7 +141,7 @@ func (storage DBStorage) ListOfDisabledRules(orgID types.OrgID) ([]ctypes.Disabl // ListOfDisabledRulesForClusters function returns list of all rules disabled from a // specified account for given list of clusters. -func (storage DBStorage) ListOfDisabledRulesForClusters( +func (storage OCPRecommendationsDBStorage) ListOfDisabledRulesForClusters( clusterList []string, orgID types.OrgID, ) ([]ctypes.DisabledRule, error) { diff --git a/storage/rule_toggle.go b/storage/rule_toggle.go index e1e177bf0..7a108e26a 100644 --- a/storage/rule_toggle.go +++ b/storage/rule_toggle.go @@ -45,7 +45,7 @@ type ClusterRuleToggle struct { } // ToggleRuleForCluster toggles rule for specified cluster -func (storage DBStorage) ToggleRuleForCluster( +func (storage OCPRecommendationsDBStorage) ToggleRuleForCluster( clusterID types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, @@ -100,7 +100,7 @@ func (storage DBStorage) ToggleRuleForCluster( } // GetFromClusterRuleToggle gets a rule from cluster_rule_toggle -func (storage DBStorage) GetFromClusterRuleToggle( +func (storage OCPRecommendationsDBStorage) GetFromClusterRuleToggle( clusterID types.ClusterName, ruleID types.RuleID, ) (*ClusterRuleToggle, error) { var disabledRule ClusterRuleToggle @@ -146,7 +146,7 @@ func (storage DBStorage) GetFromClusterRuleToggle( } // GetTogglesForRules gets enable/disable toggle for rules -func (storage DBStorage) GetTogglesForRules( +func (storage OCPRecommendationsDBStorage) GetTogglesForRules( clusterID types.ClusterName, rulesReport []types.RuleOnReport, orgID types.OrgID, @@ -199,7 +199,7 @@ func (storage DBStorage) GetTogglesForRules( } // DeleteFromRuleClusterToggle deletes a record from the table rule_cluster_toggle. Only exposed in debug mode. -func (storage DBStorage) DeleteFromRuleClusterToggle( +func (storage OCPRecommendationsDBStorage) DeleteFromRuleClusterToggle( clusterID types.ClusterName, ruleID types.RuleID, ) error { query := ` diff --git a/storage/sql_hooks.go b/storage/sql_hooks.go index 4ec76a7c1..2e76e132b 100644 --- a/storage/sql_hooks.go +++ b/storage/sql_hooks.go @@ -91,7 +91,7 @@ func InitSQLDriverWithLogs( realDriver sql_driver.Driver, realDriverName string, ) string { - // linear search is not gonna be an issue since there's not many drivers + // linear search is not going to be an issue since there's not many drivers // and we call New() only ones/twice per process life foundHooksDriver := false hooksDriverName := realDriverName + "WithHooks" diff --git a/storage/sql_hooks_test.go b/storage/sql_hooks_test.go index c907fd599..f892a41de 100644 --- a/storage/sql_hooks_test.go +++ b/storage/sql_hooks_test.go @@ -25,7 +25,6 @@ import ( "time" "github.com/lib/pq" - "github.com/mattn/go-sqlite3" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" @@ -38,12 +37,6 @@ func TestInitSQLDriverWithLogs(t *testing.T) { zerolog.SetGlobalLevel(zerolog.DebugLevel) driverName := storage.InitSQLDriverWithLogs( - &sqlite3.SQLiteDriver{}, - "sqlite3", - ) - assert.Equal(t, "sqlite3WithHooks", driverName) - - driverName = storage.InitSQLDriverWithLogs( &pq.Driver{}, "postgres", ) @@ -57,10 +50,10 @@ func TestInitSQLDriverWithLogsMultipleCalls(t *testing.T) { for i := 0; i < 10; i++ { driverName := storage.InitSQLDriverWithLogs( - &sqlite3.SQLiteDriver{}, - "sqlite3", + &pq.Driver{}, + "postgres", ) - assert.Equal(t, "sqlite3WithHooks", driverName) + assert.Equal(t, "postgresWithHooks", driverName) } } diff --git a/storage/storage.go b/storage/storage.go index e00bdfc07..7b81379fc 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -1,5 +1,5 @@ /* -Copyright © 2020, 2021, 2022, 2023 Red Hat, Inc. +Copyright © 2023 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,1502 +13,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and */ -// Package storage contains an implementation of interface between Go code and -// (almost any) SQL database like PostgreSQL, SQLite, or MariaDB. An implementation -// named DBStorage is constructed via New function and it is mandatory to call Close -// for any opened connection to database. The storage might be initialized by Init -// method if database schema is empty. -// -// It is possible to configure connection to selected database by using Configuration -// structure. Currently that structure contains two configurable parameter: -// -// Driver - a SQL driver, like "sqlite3", "pq" etc. -// DataSource - specification of data source. The content of this parameter depends on the database used. package storage import ( "database/sql" - sql_driver "database/sql/driver" - "encoding/json" - "fmt" - "strings" - "time" - "github.com/Shopify/sarama" - "github.com/lib/pq" // PostgreSQL database driver - "github.com/mattn/go-sqlite3" // SQLite database driver - "github.com/rs/zerolog/log" - - "github.com/RedHatInsights/insights-operator-utils/redis" - ctypes "github.com/RedHatInsights/insights-results-types" - - "github.com/RedHatInsights/insights-results-aggregator/metrics" "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/types" ) +// PostgreSQL database driver + // Storage represents an interface to almost any database or storage system type Storage interface { Init() error Close() error - ListOfOrgs() ([]types.OrgID, error) - ListOfClustersForOrg( - orgID types.OrgID, timeLimit time.Time) ([]types.ClusterName, error, - ) - ListOfClustersForOrgSpecificRule( - orgID types.OrgID, ruleID types.RuleSelector, activeClusters []string, - ) ([]ctypes.HittingClustersData, error) - ReadReportForCluster( - orgID types.OrgID, clusterName types.ClusterName) ( - []types.RuleOnReport, types.Timestamp, types.Timestamp, types.Timestamp, error, - ) - ReadReportInfoForCluster( - types.OrgID, types.ClusterName) ( - types.Version, error, - ) - ReadClusterVersionsForClusterList( - types.OrgID, []string, - ) (map[types.ClusterName]types.Version, error) - ReadReportsForClusters( - clusterNames []types.ClusterName) (map[types.ClusterName]types.ClusterReport, error) - ReadOrgIDsForClusters( - clusterNames []types.ClusterName) ([]types.OrgID, error) - ReadSingleRuleTemplateData( - orgID types.OrgID, clusterName types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, - ) (interface{}, error) - ReadReportForClusterByClusterName(clusterName types.ClusterName) ([]types.RuleOnReport, types.Timestamp, error) - WriteReportForCluster( - orgID types.OrgID, - clusterName types.ClusterName, - report types.ClusterReport, - rules []types.ReportItem, - collectedAtTime time.Time, - gatheredAtTime time.Time, - storedAtTime time.Time, - requestID types.RequestID, - ) error - WriteReportInfoForCluster( - types.OrgID, - types.ClusterName, - []types.InfoItem, - time.Time, - ) error - WriteRecommendationsForCluster( - orgID types.OrgID, - clusterName types.ClusterName, - report types.ClusterReport, - creationTime types.Timestamp, - ) error - ReportsCount() (int, error) - VoteOnRule( - clusterID types.ClusterName, - ruleID types.RuleID, - errorKey types.ErrorKey, - orgID types.OrgID, - userID types.UserID, - userVote types.UserVote, - voteMessage string, - ) error - AddOrUpdateFeedbackOnRule( - clusterID types.ClusterName, - ruleID types.RuleID, - errorKey types.ErrorKey, - orgID types.OrgID, - userID types.UserID, - message string, - ) error - AddFeedbackOnRuleDisable( - clusterID types.ClusterName, - ruleID types.RuleID, - errorKey types.ErrorKey, - orgID types.OrgID, - userID types.UserID, - message string, - ) error - GetUserFeedbackOnRule( - clusterID types.ClusterName, - ruleID types.RuleID, - errorKey types.ErrorKey, - userID types.UserID, - ) (*UserFeedbackOnRule, error) - GetUserFeedbackOnRuleDisable( - clusterID types.ClusterName, - ruleID types.RuleID, - errorKey types.ErrorKey, - userID types.UserID, - ) (*UserFeedbackOnRule, error) - DeleteReportsForOrg(orgID types.OrgID) error - DeleteReportsForCluster(clusterName types.ClusterName) error - ToggleRuleForCluster( - clusterID types.ClusterName, - ruleID types.RuleID, - errorKey types.ErrorKey, - orgID types.OrgID, - ruleToggle RuleToggle, - ) error - GetFromClusterRuleToggle( - types.ClusterName, - types.RuleID, - ) (*ClusterRuleToggle, error) - GetTogglesForRules( - clusterID types.ClusterName, - rulesReport []types.RuleOnReport, - orgID types.OrgID, - ) (map[types.RuleID]bool, error) - DeleteFromRuleClusterToggle( - clusterID types.ClusterName, - ruleID types.RuleID, - ) error - GetOrgIDByClusterID(cluster types.ClusterName) (types.OrgID, error) - WriteConsumerError(msg *sarama.ConsumerMessage, consumerErr error) error - GetUserFeedbackOnRules( - clusterID types.ClusterName, - rulesReport []types.RuleOnReport, - userID types.UserID, - ) (map[types.RuleID]types.UserVote, error) - GetUserDisableFeedbackOnRules( - clusterID types.ClusterName, - rulesReport []types.RuleOnReport, - userID types.UserID, - ) (map[types.RuleID]UserFeedbackOnRule, error) - DoesClusterExist(clusterID types.ClusterName) (bool, error) - ListOfDisabledRules(orgID types.OrgID) ([]ctypes.DisabledRule, error) - ListOfReasons(userID types.UserID) ([]DisabledRuleReason, error) - ListOfDisabledRulesForClusters( - clusterList []string, - orgID types.OrgID, - ) ([]ctypes.DisabledRule, error) - ListOfDisabledClusters( - orgID types.OrgID, - ruleID types.RuleID, - errorKey types.ErrorKey, - ) ([]ctypes.DisabledClusterInfo, error) - RateOnRule( - types.OrgID, - types.RuleID, - types.ErrorKey, - types.UserVote, - ) error - GetRuleRating( - types.OrgID, - types.RuleSelector, - ) (types.RuleRating, error) - DisableRuleSystemWide( - orgID types.OrgID, ruleID types.RuleID, - errorKey types.ErrorKey, justification string, - ) error - EnableRuleSystemWide( - orgID types.OrgID, - ruleID types.RuleID, - errorKey types.ErrorKey, - ) error - UpdateDisabledRuleJustification( - orgID types.OrgID, - ruleID types.RuleID, - errorKey types.ErrorKey, - justification string, - ) error - ReadDisabledRule( - orgID types.OrgID, ruleID types.RuleID, errorKey types.ErrorKey, - ) (ctypes.SystemWideRuleDisable, bool, error) - ListOfSystemWideDisabledRules( - orgID types.OrgID, - ) ([]ctypes.SystemWideRuleDisable, error) - ReadRecommendationsForClusters([]string, types.OrgID) (ctypes.RecommendationImpactedClusters, error) - ReadClusterListRecommendations(clusterList []string, orgID types.OrgID) ( - ctypes.ClusterRecommendationMap, error, - ) - MigrateToLatest() error GetConnection() *sql.DB - PrintRuleDisableDebugInfo() + GetMigrations() []migration.Migration GetDBDriverType() types.DBDriver -} - -// ReportSuffix is used to strip away .report suffix from rule module names -const ReportSuffix = ".report" - -// DBStorage is an implementation of Storage interface that use selected SQL like database -// like SQLite, PostgreSQL, MariaDB, RDS etc. That implementation is based on the standard -// sql package. It is possible to configure connection via Configuration structure. -// SQLQueriesLog is log for sql queries, default is nil which means nothing is logged -type DBStorage struct { - connection *sql.DB - dbDriverType types.DBDriver - // clusterLastCheckedDict is a dictionary of timestamps when the clusters were last checked. - clustersLastChecked map[types.ClusterName]time.Time -} - -// New function creates and initializes a new instance of Storage interface -func New(configuration Configuration) (Storage, error) { - switch configuration.Type { - case types.SQLStorage: - return newSQLStorage(configuration) - case types.RedisStorage: - return newRedisStorage(configuration) - case types.NoopStorage: - return newNoopStorage(configuration) - default: - // error to be thrown - err := fmt.Errorf("Unknown storage type '%s'", configuration.Type) - log.Error().Err(err).Msg("Init failure") - return nil, err - } -} - -// newNoopStorage function creates and initializes a new instance of Noop storage -func newNoopStorage(_ Configuration) (Storage, error) { - return &NoopStorage{}, nil -} - -// newRedisStorage function creates and initializes a new instance of Redis storage -func newRedisStorage(configuration Configuration) (Storage, error) { - redisCfg := configuration.RedisConfiguration - log.Info(). - Str("Endpoint", redisCfg.RedisEndpoint). - Int("Database index", redisCfg.RedisDatabase). - Msg("Making connection to Redis storage") - - // pass for unit tests - if redisCfg.RedisEndpoint == "" { - return &RedisStorage{}, nil - } - - client, err := redis.CreateRedisClient( - redisCfg.RedisEndpoint, - redisCfg.RedisDatabase, - redisCfg.RedisPassword, - redisCfg.RedisTimeoutSeconds, - ) - // check for init error - if err != nil { - log.Error().Err(err).Msg("Error constructing Redis client") - return nil, err - } - - log.Info().Msg("Redis client has been initialized") - - redisStorage := &RedisStorage{ - Client: redis.Client{Connection: client}, - } - - err = redisStorage.Init() - if err != nil { - log.Error().Err(err).Msg("Error initializing Redis client") - return nil, err - } - return redisStorage, nil -} - -// newSQLStorage function creates and initializes a new instance of DB storage -func newSQLStorage(configuration Configuration) (Storage, error) { - driverType, driverName, dataSource, err := initAndGetDriver(configuration) - if err != nil { - return nil, err - } - - log.Info().Msgf( - "Making connection to data storage, driver=%s", - driverName, - ) - - connection, err := sql.Open(driverName, dataSource) - if err != nil { - log.Error().Err(err).Msg("Can not connect to data storage") - return nil, err - } - - return NewFromConnection(connection, driverType), nil -} - -// NewFromConnection function creates and initializes a new instance of Storage interface from prepared connection -func NewFromConnection(connection *sql.DB, dbDriverType types.DBDriver) *DBStorage { - return &DBStorage{ - connection: connection, - dbDriverType: dbDriverType, - clustersLastChecked: map[types.ClusterName]time.Time{}, - } -} - -// initAndGetDriver initializes driver(with logs if logSQLQueries is true), -// checks if it's supported and returns driver type, driver name, dataSource and error -func initAndGetDriver(configuration Configuration) (driverType types.DBDriver, driverName, dataSource string, err error) { - var driver sql_driver.Driver - driverName = configuration.Driver - - switch driverName { - case "sqlite3": - driverType = types.DBDriverSQLite3 - driver = &sqlite3.SQLiteDriver{} - dataSource = configuration.SQLiteDataSource - case "postgres": - driverType = types.DBDriverPostgres - driver = &pq.Driver{} - dataSource = fmt.Sprintf( - "postgresql://%v:%v@%v:%v/%v?%v", - configuration.PGUsername, - configuration.PGPassword, - configuration.PGHost, - configuration.PGPort, - configuration.PGDBName, - configuration.PGParams, - ) - default: - err = fmt.Errorf("driver %v is not supported", driverName) - return - } - - if configuration.LogSQLQueries { - driverName = InitSQLDriverWithLogs(driver, driverName) - } - - return -} - -// MigrateToLatest migrates the database to the latest available -// migration version. This must be done before an Init() call. -func (storage DBStorage) MigrateToLatest() error { - if err := migration.InitInfoTable(storage.connection); err != nil { - return err - } - - return migration.SetDBVersion(storage.connection, storage.dbDriverType, migration.GetMaxVersion()) -} - -// Init performs all database initialization -// tasks necessary for further service operation. -func (storage DBStorage) Init() error { - // Read clusterName:LastChecked dictionary from DB. - rows, err := storage.connection.Query("SELECT cluster, last_checked_at FROM report;") - if err != nil { - return err - } - - for rows.Next() { - var ( - clusterName types.ClusterName - lastChecked time.Time - ) - - if err := rows.Scan(&clusterName, &lastChecked); err != nil { - if closeErr := rows.Close(); closeErr != nil { - log.Error().Err(closeErr).Msg("Unable to close the DB rows handle") - } - return err - } - - storage.clustersLastChecked[clusterName] = lastChecked - } - - // Not using defer to close the rows here to: - // - make errcheck happy (it doesn't like ignoring returned errors), - // - return a possible error returned by the Close method. - return rows.Close() -} - -// Close method closes the connection to database. Needs to be called at the end of application lifecycle. -func (storage DBStorage) Close() error { - log.Info().Msg("Closing connection to data storage") - if storage.connection != nil { - err := storage.connection.Close() - if err != nil { - log.Error().Err(err).Msg("Can not close connection to data storage") - return err - } - } - return nil -} - -// Report represents one (latest) cluster report. -// -// Org: organization ID -// Name: cluster GUID in the following format: -// c8590f31-e97e-4b85-b506-c45ce1911a12 -type Report struct { - Org types.OrgID `json:"org"` - Name types.ClusterName `json:"cluster"` - Report types.ClusterReport `json:"report"` - ReportedAt types.Timestamp `json:"reported_at"` -} - -func closeRows(rows *sql.Rows) { - _ = rows.Close() -} - -// ListOfOrgs reads list of all organizations that have at least one cluster report -func (storage DBStorage) ListOfOrgs() ([]types.OrgID, error) { - orgs := make([]types.OrgID, 0) - - rows, err := storage.connection.Query("SELECT DISTINCT org_id FROM report ORDER BY org_id;") - err = types.ConvertDBError(err, nil) - if err != nil { - return orgs, err - } - defer closeRows(rows) - - for rows.Next() { - var orgID types.OrgID - - err = rows.Scan(&orgID) - if err == nil { - orgs = append(orgs, orgID) - } else { - log.Error().Err(err).Msg("ListOfOrgID") - } - } - return orgs, nil -} - -// ListOfClustersForOrg reads list of all clusters fro given organization -func (storage DBStorage) ListOfClustersForOrg(orgID types.OrgID, timeLimit time.Time) ([]types.ClusterName, error) { - clusters := make([]types.ClusterName, 0) - - q := ` - SELECT cluster - FROM report - WHERE org_id = $1 - AND reported_at >= $2 - ORDER BY cluster; - ` - - rows, err := storage.connection.Query(q, orgID, timeLimit) - - err = types.ConvertDBError(err, orgID) - if err != nil { - return clusters, err - } - defer closeRows(rows) - - for rows.Next() { - var clusterName string - - err = rows.Scan(&clusterName) - if err == nil { - clusters = append(clusters, types.ClusterName(clusterName)) - } else { - log.Error().Err(err).Msg("ListOfClustersForOrg") - } - } - return clusters, nil -} - -// ListOfClustersForOrgSpecificRule returns list of all clusters for given organization that are affect by given rule -func (storage DBStorage) ListOfClustersForOrgSpecificRule( - orgID types.OrgID, - ruleID types.RuleSelector, - activeClusters []string) ( - []ctypes.HittingClustersData, error) { - results := make([]ctypes.HittingClustersData, 0) - - var whereClause string - if len(activeClusters) > 0 { - // #nosec G201 - whereClause = fmt.Sprintf(`WHERE org_id = $1 AND rule_id = $2 AND cluster_id IN (%v)`, - inClauseFromSlice(activeClusters)) - } else { - whereClause = `WHERE org_id = $1 AND rule_id = $2` - } - // #nosec G202 - query := `SELECT cluster_id, created_at, impacted_since FROM recommendation ` + whereClause + ` ORDER BY cluster_id;` - - // #nosec G202 - rows, err := storage.connection.Query(query, orgID, ruleID) - - err = types.ConvertDBError(err, orgID) - if err != nil { - return results, err - } - - defer closeRows(rows) - - var ( - clusterName types.ClusterName - lastSeen string - impactedSince string - ) - for rows.Next() { - err = rows.Scan(&clusterName, &lastSeen, &impactedSince) - if err != nil { - log.Error().Err(err).Msg("ListOfClustersForOrgSpecificRule") - } - results = append(results, ctypes.HittingClustersData{ - Cluster: clusterName, - LastSeen: lastSeen, - ImpactedSince: impactedSince, - }) - } - - // This is to ensure 404 when no recommendation is found for the given orgId + selector. - // We can, alternatively, return something like this with a 204 (no content): - // {"data":[],"meta":{"count":0,"component":"test.rule","error_key":"ek"},"status":"not_found"} - if len(results) == 0 { - return results, &types.ItemNotFoundError{ItemID: ruleID} - } - return results, nil -} - -// GetOrgIDByClusterID reads OrgID for specified cluster -func (storage DBStorage) GetOrgIDByClusterID(cluster types.ClusterName) (types.OrgID, error) { - row := storage.connection.QueryRow("SELECT org_id FROM report WHERE cluster = $1 ORDER BY org_id;", cluster) - - var orgID uint64 - err := row.Scan(&orgID) - if err != nil { - log.Error().Err(err).Msg("GetOrgIDByClusterID") - return 0, err - } - return types.OrgID(orgID), nil -} - -// parseTemplateData parses template data and returns a json raw message if it's a json or a string otherwise -func parseTemplateData(templateData []byte) interface{} { - var templateDataJSON json.RawMessage - - err := json.Unmarshal(templateData, &templateDataJSON) - if err != nil { - log.Warn().Err(err).Msgf("unable to parse template data as json") - return templateData - } - - return templateDataJSON -} - -func parseRuleRows(rows *sql.Rows) ([]types.RuleOnReport, error) { - report := make([]types.RuleOnReport, 0) - - for rows.Next() { - var ( - templateDataBytes []byte - ruleFQDN types.RuleID - errorKey types.ErrorKey - createdAt sql.NullTime - ) - - err := rows.Scan(&templateDataBytes, &ruleFQDN, &errorKey, &createdAt) - if err != nil { - log.Error().Err(err).Msg("ReportListForCluster") - return report, err - } - - templateData := parseTemplateData(templateDataBytes) - var createdAtConverted time.Time - if createdAt.Valid { - createdAtConverted = createdAt.Time - } - rule := types.RuleOnReport{ - Module: ruleFQDN, - ErrorKey: errorKey, - TemplateData: templateData, - CreatedAt: types.Timestamp(createdAtConverted.UTC().Format(time.RFC3339)), - } - - report = append(report, rule) - } - - return report, nil -} - -// constructInClausule is a helper function to construct `in` clause for SQL -// statement. -func constructInClausule(howMany int) (string, error) { - // construct the `in` clause in SQL query statement - if howMany < 1 { - return "", fmt.Errorf("at least one value needed") - } - inClausule := "$1" - for i := 2; i <= howMany; i++ { - inClausule += fmt.Sprintf(",$%d", i) - } - return inClausule, nil -} - -// argsWithClusterNames is a helper function to construct arguments for SQL -// statement. -func argsWithClusterNames(clusterNames []types.ClusterName) []interface{} { - // prepare arguments - args := make([]interface{}, len(clusterNames)) - - for i, clusterName := range clusterNames { - args[i] = clusterName - } - return args -} - -// inClauseFromSlice is a helper function to construct `in` clause for SQL -// statement from a given slice of items. The received slice must be []string -// or any other type that can be asserted to []string, or else '1=1' will be -// returned, making the IN clause act like a wildcard. -func inClauseFromSlice(slice interface{}) string { - if slice, ok := slice.([]string); ok { - return "'" + strings.Join(slice, `','`) + `'` - } - return "1=1" -} - -/* -func updateRecommendationsMetrics(cluster string, deleted float64, inserted float64) { - metrics.SQLRecommendationsDeletes.WithLabelValues(cluster).Observe(deleted) - metrics.SQLRecommendationsInserts.WithLabelValues(cluster).Observe(inserted) -} -*/ - -// ReadOrgIDsForClusters read organization IDs for given list of cluster names. -func (storage DBStorage) ReadOrgIDsForClusters(clusterNames []types.ClusterName) ([]types.OrgID, error) { - // stub for return value - ids := make([]types.OrgID, 0) - - if len(clusterNames) < 1 { - return ids, nil - } - - // prepare arguments - args := argsWithClusterNames(clusterNames) - - // construct the `in` clause in SQL query statement - inClausule, err := constructInClausule(len(clusterNames)) - if err != nil { - log.Error().Err(err).Msg(inClauseError) - return ids, err - } - - // disable "G202 (CWE-89): SQL string concatenation" - // #nosec G202 - query := "SELECT DISTINCT org_id FROM report WHERE cluster in (" + inClausule + ");" - - // select results from the database - // #nosec G202 - rows, err := storage.connection.Query(query, args...) - if err != nil { - log.Error().Err(err).Msg("query to get org ids") - return ids, err - } - - // process results returned from database - for rows.Next() { - var orgID types.OrgID - - err := rows.Scan(&orgID) - if err != nil { - log.Error().Err(err).Msg("read one org id") - return ids, err - } - - ids = append(ids, orgID) - } - - // everything seems ok -> return ids - return ids, nil -} - -// ReadReportsForClusters function reads reports for given list of cluster -// names. -func (storage DBStorage) ReadReportsForClusters(clusterNames []types.ClusterName) (map[types.ClusterName]types.ClusterReport, error) { - // stub for return value - reports := make(map[types.ClusterName]types.ClusterReport) - - if len(clusterNames) < 1 { - return reports, nil - } - - // prepare arguments - args := argsWithClusterNames(clusterNames) - - // construct the `in` clause in SQL query statement - inClausule, err := constructInClausule(len(clusterNames)) - if err != nil { - log.Error().Err(err).Msg(inClauseError) - return reports, err - } - - // disable "G202 (CWE-89): SQL string concatenation" - // #nosec G202 - query := "SELECT cluster, report FROM report WHERE cluster in (" + inClausule + ");" - - // select results from the database - // #nosec G202 - rows, err := storage.connection.Query(query, args...) - if err != nil { - return reports, err - } - - // process results returned from database - for rows.Next() { - // convert into requested type - var ( - clusterName types.ClusterName - clusterReport types.ClusterReport - ) - - err := rows.Scan(&clusterName, &clusterReport) - if err != nil { - log.Error().Err(err).Msg("ReadReportsForClusters") - return reports, err - } - - reports[clusterName] = clusterReport - } - - // everything seems ok -> return reports - return reports, nil -} - -// ReadReportForCluster reads result (health status) for selected cluster -func (storage DBStorage) ReadReportForCluster( - orgID types.OrgID, clusterName types.ClusterName, -) ([]types.RuleOnReport, types.Timestamp, types.Timestamp, types.Timestamp, error) { - var lastChecked time.Time - var reportedAt time.Time - var gatheredAtInDB sql.NullTime // to avoid problems - - report := make([]types.RuleOnReport, 0) - - err := storage.connection.QueryRow( - "SELECT last_checked_at, reported_at, gathered_at FROM report WHERE org_id = $1 AND cluster = $2;", - orgID, clusterName, - ).Scan(&lastChecked, &reportedAt, &gatheredAtInDB) - - // convert timestamps to string - var lastCheckedStr = types.Timestamp(lastChecked.UTC().Format(time.RFC3339)) - var reportedAtStr = types.Timestamp(reportedAt.UTC().Format(time.RFC3339)) - var gatheredAtStr types.Timestamp - - if gatheredAtInDB.Valid { - gatheredAtStr = types.Timestamp(gatheredAtInDB.Time.UTC().Format(time.RFC3339)) - } else { - gatheredAtStr = "" - } - - err = types.ConvertDBError(err, []interface{}{orgID, clusterName}) - if err != nil { - log.Error().Err(err).Str(clusterKey, string(clusterName)).Msg( - "ReadReportForCluster query from report table error", - ) - return report, lastCheckedStr, reportedAtStr, gatheredAtStr, err - } - - rows, err := storage.connection.Query( - "SELECT template_data, rule_fqdn, error_key, created_at FROM rule_hit WHERE org_id = $1 AND cluster_id = $2;", orgID, clusterName, - ) - - err = types.ConvertDBError(err, []interface{}{orgID, clusterName}) - if err != nil { - log.Error().Err(err).Str(clusterKey, string(clusterName)).Msg( - "ReadReportForCluster query from rule_hit table error", - ) - return report, lastCheckedStr, reportedAtStr, gatheredAtStr, err - } - - report, err = parseRuleRows(rows) - - return report, lastCheckedStr, reportedAtStr, gatheredAtStr, err -} - -// ReadSingleRuleTemplateData reads template data for a single rule -func (storage DBStorage) ReadSingleRuleTemplateData( - orgID types.OrgID, clusterName types.ClusterName, ruleID types.RuleID, errorKey types.ErrorKey, -) (interface{}, error) { - var templateDataBytes []byte - - err := storage.connection.QueryRow(` - SELECT template_data FROM rule_hit - WHERE org_id = $1 AND cluster_id = $2 AND rule_fqdn = $3 AND error_key = $4; - `, - orgID, - clusterName, - ruleID, - errorKey, - ).Scan(&templateDataBytes) - err = types.ConvertDBError(err, []interface{}{orgID, clusterName, ruleID, errorKey}) - - return parseTemplateData(templateDataBytes), err -} - -// ReadReportForClusterByClusterName reads result (health status) for selected cluster for given organization -func (storage DBStorage) ReadReportForClusterByClusterName( - clusterName types.ClusterName, -) ([]types.RuleOnReport, types.Timestamp, error) { - report := make([]types.RuleOnReport, 0) - var lastChecked time.Time - - err := storage.connection.QueryRow( - "SELECT last_checked_at FROM report WHERE cluster = $1;", clusterName, - ).Scan(&lastChecked) - - switch { - case err == sql.ErrNoRows: - return report, "", &types.ItemNotFoundError{ - ItemID: fmt.Sprintf("%v", clusterName), - } - case err != nil: - return report, "", err - } - - rows, err := storage.connection.Query( - "SELECT template_data, rule_fqdn, error_key, created_at FROM rule_hit WHERE cluster_id = $1;", clusterName, - ) - - if err != nil { - return report, types.Timestamp(lastChecked.UTC().Format(time.RFC3339)), err - } - - report, err = parseRuleRows(rows) - - return report, types.Timestamp(lastChecked.UTC().Format(time.RFC3339)), err -} - -// GetRuleHitInsertStatement method prepares DB statement to be used to write -// rule FQDN + rule error key into rule_hit table for given cluster_id -func (storage DBStorage) GetRuleHitInsertStatement(rules []types.ReportItem) string { - const ruleInsertStatement = "INSERT INTO rule_hit(org_id, cluster_id, rule_fqdn, error_key, template_data, created_at) VALUES %s" - - // pre-allocate array for placeholders - placeholders := make([]string, len(rules)) - - // fill-in placeholders for INSERT statement - for index := range rules { - placeholders[index] = fmt.Sprintf("($%d,$%d,$%d,$%d,$%d,$%d)", - index*6+1, - index*6+2, - index*6+3, - index*6+4, - index*6+5, - index*6+6, - ) - } - - // construct INSERT statement for multiple values - return fmt.Sprintf(ruleInsertStatement, strings.Join(placeholders, ",")) -} - -// valuesForRuleHitsInsert function prepares values to insert rules into -// rule_hit table. -func valuesForRuleHitsInsert( - orgID types.OrgID, - clusterName types.ClusterName, - rules []types.ReportItem, - ruleKeyCreatedAt map[string]types.Timestamp, -) []interface{} { - // fill-in values for INSERT statement - values := make([]interface{}, len(rules)*6) - - for index, rule := range rules { - ruleKey := string(rule.Module) + string(rule.ErrorKey) - var impactedSince types.Timestamp - if val, ok := ruleKeyCreatedAt[ruleKey]; ok { - impactedSince = val - } else { - impactedSince = types.Timestamp(time.Now().UTC().Format(time.RFC3339)) - } - values[6*index] = orgID - values[6*index+1] = clusterName - values[6*index+2] = rule.Module - values[6*index+3] = rule.ErrorKey - values[6*index+4] = string(rule.TemplateData) - values[6*index+5] = impactedSince - } - return values -} - -func (storage DBStorage) updateReport( - tx *sql.Tx, - orgID types.OrgID, - clusterName types.ClusterName, - report types.ClusterReport, - rules []types.ReportItem, - lastCheckedTime time.Time, - gatheredAt time.Time, - reportedAtTime time.Time, -) error { - // Get the UPSERT query for writing a report into the database. - reportUpsertQuery := storage.getReportUpsertQuery() - - // Get created_at if present before deletion - query := "SELECT rule_fqdn, error_key, created_at FROM rule_hit WHERE org_id = $1 AND cluster_id = $2;" - RuleKeyCreatedAt, err := storage.getRuleKeyCreatedAtMap( - query, orgID, clusterName, - ) - if err != nil { - log.Error().Err(err).Msgf("Unable to get recommendation impacted_since") - RuleKeyCreatedAt = make(map[string]types.Timestamp) // create empty map - } - - deleteQuery := "DELETE FROM rule_hit WHERE org_id = $1 AND cluster_id = $2;" - _, err = tx.Exec(deleteQuery, orgID, clusterName) - if err != nil { - log.Err(err).Msgf("Unable to remove previous cluster reports (org: %v, cluster: %v)", orgID, clusterName) - return err - } - - // Perform the report insert. - // All older rule hits has been deleted for given cluster so it is - // possible to just insert new hits w/o the need to update on conflict - if len(rules) > 0 { - // Get the INSERT statement for writing a rule into the database. - ruleInsertStatement := storage.GetRuleHitInsertStatement(rules) - - // Get values to be stored in rule_hits table - values := valuesForRuleHitsInsert(orgID, clusterName, rules, RuleKeyCreatedAt) - - _, err = tx.Exec(ruleInsertStatement, values...) - if err != nil { - log.Err(err).Msgf("Unable to insert the cluster report rules (org: %v, cluster: %v)", - orgID, clusterName, - ) - return err - } - } - - if gatheredAt.IsZero() { - _, err = tx.Exec(reportUpsertQuery, orgID, clusterName, report, reportedAtTime, lastCheckedTime, 0, sql.NullTime{Valid: false}) - } else { - _, err = tx.Exec(reportUpsertQuery, orgID, clusterName, report, reportedAtTime, lastCheckedTime, 0, gatheredAt) - } - - if err != nil { - log.Err(err).Msgf("Unable to upsert the cluster report (org: %v, cluster: %v)", orgID, clusterName) - return err - } - - return nil -} - -func prepareInsertRecommendationsStatement( - orgID types.OrgID, - clusterName types.ClusterName, - report types.ReportRules, - createdAt types.Timestamp, - impactedSinceMap map[string]types.Timestamp, -) (selectors []string, statement string, statementArgs []interface{}) { - statement = `INSERT INTO recommendation (org_id, cluster_id, rule_fqdn, error_key, rule_id, created_at, impacted_since) VALUES %s` - - valuesIdx := make([]string, len(report.HitRules)) - statementIdx := 0 - selectors = make([]string, len(report.HitRules)) - - for idx, rule := range report.HitRules { - ruleFqdn := strings.TrimSuffix(string(rule.Module), ReportSuffix) - ruleID := ruleFqdn + "|" + string(rule.ErrorKey) - impactedSince, ok := impactedSinceMap[ruleFqdn+string(rule.ErrorKey)] - if !ok { - impactedSince = createdAt - } - selectors[idx] = ruleID - statementArgs = append(statementArgs, orgID, clusterName, ruleFqdn, rule.ErrorKey, ruleID, createdAt, impactedSince) - statementIdx = len(statementArgs) - const separatorAndParam = ", $" - valuesIdx[idx] = "($" + fmt.Sprint(statementIdx-6) + - separatorAndParam + fmt.Sprint(statementIdx-5) + - separatorAndParam + fmt.Sprint(statementIdx-4) + - separatorAndParam + fmt.Sprint(statementIdx-3) + - separatorAndParam + fmt.Sprint(statementIdx-2) + - separatorAndParam + fmt.Sprint(statementIdx-1) + - separatorAndParam + fmt.Sprint(statementIdx) + ")" - } - - statement = fmt.Sprintf(statement, strings.Join(valuesIdx, ",")) - return -} - -func (storage DBStorage) insertRecommendations( - tx *sql.Tx, - orgID types.OrgID, - clusterName types.ClusterName, - report types.ReportRules, - createdAt types.Timestamp, - impactedSince map[string]types.Timestamp, -) (inserted int, err error) { - if len(report.HitRules) == 0 { - log.Info(). - Int(organizationKey, int(orgID)). - Str(clusterKey, string(clusterName)). - Int(issuesCountKey, 0). - Msg("No new recommendation to insert") - return 0, nil - } - - selectors, statement, args := prepareInsertRecommendationsStatement(orgID, clusterName, report, createdAt, impactedSince) - - if _, err = tx.Exec(statement, args...); err != nil { - log.Error(). - Int(organizationKey, int(orgID)). - Str(clusterKey, string(clusterName)). - Int(issuesCountKey, inserted). - Interface(createdAtKey, createdAt). - Strs(selectorsKey, selectors). - Err(err). - Msg("Unable to insert the recommendations") - return 0, err - } - log.Info(). - Int(organizationKey, int(orgID)). - Str(clusterKey, string(clusterName)). - Int(issuesCountKey, inserted). - Interface(createdAtKey, createdAt). - Strs(selectorsKey, selectors). - Msg("Recommendations inserted successfully") - - inserted = len(selectors) - return -} - -// getRuleKeyCreatedAtMap returns a map between -// (rule_fqdn, error_key) -> created_at -// for each rule_hit rows matching given -// orgId and clusterName -func (storage DBStorage) getRuleKeyCreatedAtMap( - query string, - orgID types.OrgID, - clusterName types.ClusterName, -) ( - map[string]types.Timestamp, - error) { - impactedSinceRows, err := storage.connection.Query( - query, orgID, clusterName) - if err != nil { - log.Error().Err(err).Msg("error retrieving recommendation timestamp") - return nil, err - } - defer closeRows(impactedSinceRows) - - RuleKeyCreatedAt := make(map[string]types.Timestamp) - for impactedSinceRows.Next() { - var ruleFqdn string - var errorKey string - var oldTime time.Time - err := impactedSinceRows.Scan( - &ruleFqdn, - &errorKey, - &oldTime, - ) - if err != nil { - log.Error().Err(err).Msg("error scanning for rule id -> created_at map") - continue - } - newTime := types.Timestamp(oldTime.UTC().Format(time.RFC3339)) - RuleKeyCreatedAt[ruleFqdn+errorKey] = newTime - } - return RuleKeyCreatedAt, err -} - -// WriteReportForCluster writes result (health status) for selected cluster for given organization -func (storage DBStorage) WriteReportForCluster( - orgID types.OrgID, - clusterName types.ClusterName, - report types.ClusterReport, - rules []types.ReportItem, - lastCheckedTime time.Time, - gatheredAt time.Time, - storedAtTime time.Time, - _ types.RequestID, -) error { - // Skip writing the report if it isn't newer than a report - // that is already in the database for the same cluster. - if oldLastChecked, exists := storage.clustersLastChecked[clusterName]; exists && !lastCheckedTime.After(oldLastChecked) { - return types.ErrOldReport - } - - if storage.dbDriverType != types.DBDriverSQLite3 && storage.dbDriverType != types.DBDriverPostgres { - return fmt.Errorf("writing report with DB %v is not supported", storage.dbDriverType) - } - - // Begin a new transaction. - tx, err := storage.connection.Begin() - if err != nil { - return err - } - - err = func(tx *sql.Tx) error { - // Check if there is a more recent report for the cluster already in the database. - rows, err := tx.Query( - "SELECT last_checked_at FROM report WHERE org_id = $1 AND cluster = $2 AND last_checked_at > $3;", - orgID, clusterName, lastCheckedTime) - err = types.ConvertDBError(err, []interface{}{orgID, clusterName}) - if err != nil { - log.Error().Err(err).Msg("Unable to look up the most recent report in the database") - return err - } - - defer closeRows(rows) - - // If there is one, print a warning and discard the report (don't update it). - if rows.Next() { - log.Warn().Msgf("Database already contains report for organization %d and cluster name %s more recent than %v", - orgID, clusterName, lastCheckedTime) - return nil - } - - err = storage.updateReport(tx, orgID, clusterName, report, rules, lastCheckedTime, gatheredAt, storedAtTime) - if err != nil { - return err - } - - storage.clustersLastChecked[clusterName] = lastCheckedTime - metrics.WrittenReports.Inc() - - return nil - }(tx) - - finishTransaction(tx, err) - - return err -} - -// WriteRecommendationsForCluster writes hitting rules in received report for selected cluster -func (storage DBStorage) WriteRecommendationsForCluster( - orgID types.OrgID, - clusterName types.ClusterName, - stringReport types.ClusterReport, - creationTime types.Timestamp, -) (err error) { - var report types.ReportRules - err = json.Unmarshal([]byte(stringReport), &report) - if err != nil { - return err - } - tx, err := storage.connection.Begin() - if err != nil { - return err - } - - impactedSinceMap := make(map[string]ctypes.Timestamp) - err = func(tx *sql.Tx) error { - var deleted int64 - // Delete current recommendations for the cluster if some report has been previously stored for this cluster - if _, ok := storage.clustersLastChecked[clusterName]; ok { - // Get impacted_since if present - query := "SELECT rule_fqdn, error_key, impacted_since FROM recommendation WHERE org_id = $1 AND cluster_id = $2 LIMIT 1;" - impactedSinceMap, err = storage.getRuleKeyCreatedAtMap( - query, orgID, clusterName) - if err != nil { - log.Error().Err(err).Msgf("Unable to get recommendation impacted_since") - } - - // it is needed to use `org_id = $1` condition there - // because it allows DB to use proper btree indexing - // and not slow sequential scan - result, err := tx.Exec( - "DELETE FROM recommendation WHERE org_id = $1 AND cluster_id = $2;", orgID, clusterName) - err = types.ConvertDBError(err, []interface{}{clusterName}) - if err != nil { - log.Error().Err(err).Msgf("Unable to delete the existing recommendations for %s", clusterName) - return err - } - - // As the documentation says: - // RowsAffected returns the number of rows affected by an - // update, insert, or delete. Not every database or database - // driver may support this. - // So we might run in a scenario where we don't have metrics - // if the driver doesn't help. - deleted, err = result.RowsAffected() - if err != nil { - log.Error().Err(err).Msg("Unable to retrieve number of deleted rows with current driver") - return err - } - } - - inserted, err := storage.insertRecommendations(tx, orgID, clusterName, report, creationTime, impactedSinceMap) - if err != nil { - return err - } - - log.Info(). - Int64("Deleted", deleted). - Int("Inserted", inserted). - Int(organizationKey, int(orgID)). - Str(clusterKey, string(clusterName)). - Msg("Updated recommendation table") - // updateRecommendationsMetrics(string(clusterName), float64(deleted), float64(inserted)) - - return nil - }(tx) - - finishTransaction(tx, err) - - return err -} - -// finishTransaction finishes the transaction depending on err. err == nil -> commit, err != nil -> rollback -func finishTransaction(tx *sql.Tx, err error) { - if err != nil { - rollbackError := tx.Rollback() - if rollbackError != nil { - log.Err(rollbackError).Msgf("error when trying to rollback a transaction") - } - } else { - commitError := tx.Commit() - if commitError != nil { - log.Err(commitError).Msgf("error when trying to commit a transaction") - } - } -} - -// ReadRecommendationsForClusters reads all recommendations from recommendation table for given organization -func (storage DBStorage) ReadRecommendationsForClusters( - clusterList []string, - orgID types.OrgID, -) (ctypes.RecommendationImpactedClusters, error) { - impactedClusters := make(ctypes.RecommendationImpactedClusters, 0) - - if len(clusterList) < 1 { - return impactedClusters, nil - } - - // #nosec G201 - whereClause := fmt.Sprintf(`WHERE org_id = $1 AND cluster_id IN (%v)`, inClauseFromSlice(clusterList)) - - // disable "G202 (CWE-89): SQL string concatenation" - // #nosec G202 - query := ` - SELECT - rule_id, cluster_id - FROM - recommendation - ` + whereClause - - // #nosec G202 - rows, err := storage.connection.Query(query, orgID) - if err != nil { - log.Error().Err(err).Msg("query to get recommendations") - return impactedClusters, err - } - - for rows.Next() { - var ( - ruleID types.RuleID - clusterID types.ClusterName - ) - - err := rows.Scan( - &ruleID, - &clusterID, - ) - if err != nil { - log.Error().Err(err).Msg("read one recommendation") - return impactedClusters, err - } - - impactedClusters[ruleID] = append(impactedClusters[ruleID], clusterID) - } - - return impactedClusters, nil -} - -// ReadClusterListRecommendations retrieves cluster IDs and a list of hitting rules for each one -func (storage DBStorage) ReadClusterListRecommendations( - clusterList []string, - orgID types.OrgID, -) (ctypes.ClusterRecommendationMap, error) { - clusterMap := make(ctypes.ClusterRecommendationMap, 0) - - if len(clusterList) < 1 { - return clusterMap, nil - } - - // we have to select from report table primarily because we need to show last_checked_at even if there - // are no rule hits (which means there are no rows in recommendation table for that cluster) - - // disable "G202 (CWE-89): SQL string concatenation" - // #nosec G202 - query := ` - SELECT - rep.cluster, rep.last_checked_at, COALESCE(rec.rule_id, '') - FROM - report rep - LEFT JOIN - recommendation rec - ON - rep.org_id = rec.org_id AND - rep.cluster = rec.cluster_id - WHERE - rep.org_id = $1 AND rep.cluster IN (%v) - ` - // #nosec G201 - query = fmt.Sprintf(query, inClauseFromSlice(clusterList)) - - rows, err := storage.connection.Query(query, orgID) - if err != nil { - log.Error().Err(err).Msg("query to get recommendations") - return clusterMap, err - } - - for rows.Next() { - var ( - clusterID ctypes.ClusterName - ruleID ctypes.RuleID - timestampStr string - timestamp time.Time - ) - - if storage.dbDriverType != types.DBDriverSQLite3 { - // postgres is able to auto scan created_at into time.Time - err := rows.Scan( - &clusterID, - ×tamp, - &ruleID, - ) - if err != nil { - log.Error().Err(err).Msg("problem reading one recommendation") - return clusterMap, err - } - } else { - // sqlite cannot auto scan into time.Time, needs manual parse - err := rows.Scan( - &clusterID, - ×tampStr, - &ruleID, - ) - if err != nil { - log.Error().Err(err).Msg("problem reading one recommendation") - return clusterMap, err - } - - timestamp, err = time.Parse(time.RFC3339, timestampStr) - if err != nil { - log.Error().Err(err).Msgf("unparsable timestamp %v", timestamp) - return clusterMap, err - } - } - - if cluster, exists := clusterMap[clusterID]; exists { - cluster.Recommendations = append(cluster.Recommendations, ruleID) - clusterMap[clusterID] = cluster - } else { - // create entry in map for new cluster ID - clusterMap[clusterID] = ctypes.ClusterRecommendationList{ - // created at is the same for all rows for each cluster - CreatedAt: timestamp, - Recommendations: []ctypes.RuleID{ruleID}, - } - } - } - - storage.fillInMetadata(orgID, clusterMap) - return clusterMap, nil -} - -// ReportsCount reads number of all records stored in database -func (storage DBStorage) ReportsCount() (int, error) { - count := -1 - err := storage.connection.QueryRow("SELECT count(*) FROM report;").Scan(&count) - err = types.ConvertDBError(err, nil) - - return count, err -} - -// DeleteReportsForOrg deletes all reports related to the specified organization from the storage. -func (storage DBStorage) DeleteReportsForOrg(orgID types.OrgID) error { - _, err := storage.connection.Exec("DELETE FROM report WHERE org_id = $1;", orgID) - return err -} - -// DeleteReportsForCluster deletes all reports related to the specified cluster from the storage. -func (storage DBStorage) DeleteReportsForCluster(clusterName types.ClusterName) error { - _, err := storage.connection.Exec("DELETE FROM report WHERE cluster = $1;", clusterName) - return err -} - -// GetConnection returns db connection(useful for testing) -func (storage DBStorage) GetConnection() *sql.DB { - return storage.connection -} - -// WriteConsumerError writes a report about a consumer error into the storage. -func (storage DBStorage) WriteConsumerError(msg *sarama.ConsumerMessage, consumerErr error) error { - _, err := storage.connection.Exec(` - INSERT INTO consumer_error (topic, partition, topic_offset, key, produced_at, consumed_at, message, error) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, - msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Timestamp, time.Now().UTC(), msg.Value, consumerErr.Error()) - - return err -} - -// GetDBDriverType returns db driver type -func (storage DBStorage) GetDBDriverType() types.DBDriver { - return storage.dbDriverType -} - -// DoesClusterExist checks if cluster with this id exists -func (storage DBStorage) DoesClusterExist(clusterID types.ClusterName) (bool, error) { - err := storage.connection.QueryRow( - "SELECT cluster FROM report WHERE cluster = $1", clusterID, - ).Scan(&clusterID) - if err == sql.ErrNoRows { - return false, nil - } else if err != nil { - return false, err - } - - return true, nil -} - -// ListOfDisabledClusters function returns list of all clusters disabled for a rule from a -// specified account. -func (storage DBStorage) ListOfDisabledClusters( - orgID types.OrgID, - ruleID types.RuleID, - errorKey types.ErrorKey, -) ( - disabledClusters []ctypes.DisabledClusterInfo, - err error, -) { - // select disabled rules from toggle table and the latest feedback from disable_feedback table - // LEFT join and COALESCE are used for the feedback, because feedback is filled by different - // request than toggle, so it might be empty/null - query := ` - SELECT - toggle.cluster_id, - toggle.disabled_at, - COALESCE(feedback.message, '') - FROM - cluster_rule_toggle toggle - LEFT JOIN - cluster_user_rule_disable_feedback feedback - ON feedback.updated_at = ( - SELECT updated_at - FROM cluster_user_rule_disable_feedback - WHERE cluster_id = toggle.cluster_id - AND org_id = $1 - AND rule_id = $2 - AND error_key = $3 - ORDER BY updated_at DESC - LIMIT 1 - ) - WHERE - toggle.org_id = $1 - AND toggle.rule_id = $2 - AND toggle.error_key = $3 - AND toggle.disabled = $4 - ORDER BY - toggle.disabled_at DESC - ` - - // run the query against database - rows, err := storage.connection.Query(query, orgID, ruleID, errorKey, RuleToggleDisable) - - // return empty list in case of any error - if err != nil { - return disabledClusters, err - } - defer closeRows(rows) - - for rows.Next() { - var disabledCluster ctypes.DisabledClusterInfo - - err = rows.Scan( - &disabledCluster.ClusterID, - &disabledCluster.DisabledAt, - &disabledCluster.Justification, - ) - - if err != nil { - log.Error().Err(err).Msg("ReadListOfDisabledRules") - // return partially filled slice + error - return disabledClusters, err - } - - // append disabled cluster read from database to a slice - disabledClusters = append(disabledClusters, disabledCluster) - } - - return disabledClusters, nil + GetDBSchema() migration.Schema + GetMaxVersion() migration.Version + MigrateToLatest() error } diff --git a/storage/storage_rules_test.go b/storage/storage_rules_test.go index 3cff80983..d4d0f980d 100644 --- a/storage/storage_rules_test.go +++ b/storage/storage_rules_test.go @@ -19,7 +19,6 @@ import ( "database/sql" "database/sql/driver" "fmt" - "os" "strings" "testing" "time" @@ -37,7 +36,7 @@ import ( "github.com/RedHatInsights/insights-results-aggregator/types" ) -func mustWriteReport3Rules(t *testing.T, mockStorage storage.Storage) { +func mustWriteReport3Rules(t *testing.T, mockStorage storage.OCPRecommendationsStorage) { err := mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, testdata.Report3Rules, testdata.Report3RulesParsed, testdata.LastCheckedAt, testdata.LastCheckedAt, time.Now(), testdata.RequestID1, @@ -45,7 +44,7 @@ func mustWriteReport3Rules(t *testing.T, mockStorage storage.Storage) { helpers.FailOnError(t, err) } -func mustWriteReport3RulesForCluster(t *testing.T, mockStorage storage.Storage, clusterName types.ClusterName) { +func mustWriteReport3RulesForCluster(t *testing.T, mockStorage storage.OCPRecommendationsStorage, clusterName types.ClusterName) { err := mockStorage.WriteReportForCluster( testdata.OrgID, clusterName, testdata.Report3Rules, testdata.Report3RulesParsed, testdata.LastCheckedAt, testdata.LastCheckedAt, time.Now(), testdata.RequestID1, @@ -58,7 +57,7 @@ func TestDBStorage_ToggleRuleForCluster(t *testing.T) { storage.RuleToggleDisable, storage.RuleToggleEnable, } { func(state storage.RuleToggle) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -74,7 +73,7 @@ func TestDBStorage_ToggleRuleForCluster(t *testing.T) { } func TestDBStorage_ToggleRuleForCluster_UnexpectedRuleToggleValue(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.ToggleRuleForCluster( @@ -84,7 +83,7 @@ func TestDBStorage_ToggleRuleForCluster_UnexpectedRuleToggleValue(t *testing.T) } func TestDBStorage_ToggleRuleForCluster_DBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) closer() err := mockStorage.ToggleRuleForCluster( @@ -94,7 +93,7 @@ func TestDBStorage_ToggleRuleForCluster_DBError(t *testing.T) { } func TestDBStorageGetTogglesForRules_NoRules(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() _, err := mockStorage.GetTogglesForRules( @@ -104,7 +103,7 @@ func TestDBStorageGetTogglesForRules_NoRules(t *testing.T) { } func TestDBStorageGetTogglesForRules_AllRulesEnabled(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() _, err := mockStorage.GetTogglesForRules( @@ -114,7 +113,7 @@ func TestDBStorageGetTogglesForRules_AllRulesEnabled(t *testing.T) { } func TestDBStorageGetTogglesForRules_OneRuleDisabled(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() helpers.FailOnError(t, mockStorage.ToggleRuleForCluster( @@ -141,7 +140,7 @@ func TestDBStorageToggleRuleAndGet(t *testing.T) { storage.RuleToggleDisable, storage.RuleToggleEnable, } { func(state storage.RuleToggle) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -170,7 +169,7 @@ func TestDBStorageToggleRuleAndGet(t *testing.T) { // TestDBStorageListRulesReasonsOnDBError checks that no rules reasons are // returned for DB error. func TestDBStorageListRulesReasonsOnDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // close storage immediately closer() @@ -182,7 +181,7 @@ func TestDBStorageListRulesReasonsOnDBError(t *testing.T) { // TestDBStorageListRulesReasonsEmptyDB checks that no rules reasons are // returned for empty DB. func TestDBStorageListRulesReasonsEmptyDB(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() // try to read list of reasons @@ -196,7 +195,7 @@ func TestDBStorageListRulesReasonsEmptyDB(t *testing.T) { // TestDBStorageListOfRulesReasonsOneRule checks that one rule is returned // for non empty DB. func TestDBStorageListOfRulesReasonsOneRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() // write some rules into database @@ -228,7 +227,7 @@ func TestDBStorageListOfRulesReasonsOneRule(t *testing.T) { // TestDBStorageListOfDisabledRulesDBError checks that no rules are returned // for DB error. func TestDBStorageListOfDisabledRulesDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) // close storage immediately closer() @@ -240,7 +239,7 @@ func TestDBStorageListOfDisabledRulesDBError(t *testing.T) { // TestDBStorageListOfDisabledRulesEmptyDB checks that no rules are returned // for empty DB. func TestDBStorageListOfDisabledRulesEmptyDB(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() // try to read list of disabled rules @@ -254,7 +253,7 @@ func TestDBStorageListOfDisabledRulesEmptyDB(t *testing.T) { // TestDBStorageListOfDisabledRulesOneRule checks that one rule is returned // for non empty DB. func TestDBStorageListOfDisabledRulesOneRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() // write some rules into database @@ -284,7 +283,7 @@ func TestDBStorageListOfDisabledRulesOneRule(t *testing.T) { // TestDBStorageListOfDisabledRulesTwoRules checks that two rules are returned // for non empty DB. func TestDBStorageListOfDisabledRulesTwoRules(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() // write some rules into database @@ -320,7 +319,7 @@ func TestDBStorageListOfDisabledRulesTwoRules(t *testing.T) { // TestDBStorageListOfDisabledRulesNoRule checks that no rule is returned // for non empty DB when all rules are enabled. func TestDBStorageListOfDisabledRulesNoRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() // write some rules into database @@ -345,7 +344,7 @@ func TestDBStorageVoteOnRule(t *testing.T) { types.UserVoteDislike, types.UserVoteLike, types.UserVoteNone, } { func(vote types.UserVote) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -373,7 +372,7 @@ func TestDBStorageVoteOnRule_NoCluster(t *testing.T) { types.UserVoteDislike, types.UserVoteLike, types.UserVoteNone, } { func(vote types.UserVote) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() err := mockStorage.VoteOnRule( @@ -386,7 +385,7 @@ func TestDBStorageVoteOnRule_NoCluster(t *testing.T) { } func TestDBStorageChangeVote(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -415,7 +414,7 @@ func TestDBStorageChangeVote(t *testing.T) { } func TestDBStorageTextFeedback(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -437,7 +436,7 @@ func TestDBStorageTextFeedback(t *testing.T) { } func TestDBStorageFeedbackChangeMessage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -465,7 +464,7 @@ func TestDBStorageFeedbackChangeMessage(t *testing.T) { } func TestDBStorageFeedbackErrorItemNotFound(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() _, err := mockStorage.GetUserFeedbackOnRule(testdata.ClusterName, testdata.Rule1ID, testdata.ErrorKey1, testdata.UserID) @@ -475,7 +474,7 @@ func TestDBStorageFeedbackErrorItemNotFound(t *testing.T) { } func TestDBStorageFeedbackErrorDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) closer() _, err := mockStorage.GetUserFeedbackOnRule(testdata.ClusterName, testdata.Rule1ID, testdata.ErrorKey1, testdata.UserID) @@ -483,7 +482,7 @@ func TestDBStorageFeedbackErrorDBError(t *testing.T) { } func TestDBStorageVoteOnRuleDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) closer() err := mockStorage.VoteOnRule(testdata.ClusterName, testdata.Rule1ID, testdata.ErrorKey1, testdata.OrgID, testdata.UserID, types.UserVoteNone, "") @@ -491,13 +490,13 @@ func TestDBStorageVoteOnRuleDBError(t *testing.T) { } func TestDBStorageVoteOnRuleDBExecError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, false) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, false) defer closer() - connection := storage.GetConnection(mockStorage.(*storage.DBStorage)) + connection := storage.GetConnection(mockStorage.(*storage.OCPRecommendationsDBStorage)) query := ` CREATE TABLE cluster_rule_user_feedback ( - cluster_id INTEGER NOT NULL CHECK(typeof(cluster_id) = 'integer'), + cluster_id INTEGER NOT NULL, rule_id INTEGER NOT NULL, org_id VARCHAR NOT NULL, user_id INTEGER NOT NULL, @@ -511,34 +510,15 @@ func TestDBStorageVoteOnRuleDBExecError(t *testing.T) { ) ` - if os.Getenv("INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB") == "postgres" { - query = ` - CREATE TABLE cluster_rule_user_feedback ( - cluster_id INTEGER NOT NULL, - rule_id INTEGER NOT NULL, - org_id VARCHAR NOT NULL, - user_id INTEGER NOT NULL, - message INTEGER NOT NULL, - user_vote INTEGER NOT NULL, - added_at INTEGER NOT NULL, - updated_at INTEGER NOT NULL, - error_key VARCHAR NOT NULL, - - PRIMARY KEY(cluster_id, rule_id, user_id, error_key) - ) - ` - } - // create a table with a bad type _, err := connection.Exec(query) helpers.FailOnError(t, err) err = mockStorage.VoteOnRule("non int", testdata.Rule1ID, testdata.ErrorKey1, testdata.OrgID, testdata.UserID, types.UserVoteNone, "") assert.Error(t, err) - const sqliteErrMessage = "CHECK constraint failed: cluster_rule_user_feedback" - const postgresErrMessage = "pq: invalid input syntax for integer: \"non int\"" - if err.Error() != sqliteErrMessage && !strings.HasPrefix(err.Error(), postgresErrMessage) { - t.Fatalf("expected one of: \n%v\n%v\ngot:\n%v", sqliteErrMessage, postgresErrMessage, err.Error()) + const postgresErrMessage = "pq: invalid input syntax for type integer: \"non int\"" + if !strings.HasPrefix(err.Error(), postgresErrMessage) { + t.Fatalf("expected : \n%v\ngot:\n%v", postgresErrMessage, err.Error()) } } @@ -570,7 +550,7 @@ func TestDBStorageVoteOnRuleDBCloseError(t *testing.T) { } func TestDBStorageGetVotesForNoRules(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() feedbacks, err := mockStorage.GetUserFeedbackOnRules( @@ -582,7 +562,7 @@ func TestDBStorageGetVotesForNoRules(t *testing.T) { } func TestDBStorageGetDisableFeedback(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() feedbacks, err := mockStorage.GetUserDisableFeedbackOnRules( @@ -594,7 +574,7 @@ func TestDBStorageGetDisableFeedback(t *testing.T) { } func TestDBStorageGetVotes(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -619,7 +599,7 @@ func TestDBStorageGetVotes(t *testing.T) { } func TestDBStorageTextDisableFeedback(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -642,7 +622,7 @@ func TestDBStorageTextDisableFeedback(t *testing.T) { } func TestDBStorageDisableFeedbackChangeMessage(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() mustWriteReport3Rules(t, mockStorage) @@ -670,7 +650,7 @@ func TestDBStorageDisableFeedbackChangeMessage(t *testing.T) { } func TestDBStorageDisableFeedbackErrorItemNotFound(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() _, err := mockStorage.GetUserFeedbackOnRuleDisable(testdata.ClusterName, testdata.Rule1ID, testdata.ErrorKey1, testdata.UserID) @@ -680,7 +660,7 @@ func TestDBStorageDisableFeedbackErrorItemNotFound(t *testing.T) { } func TestDBStorageDisableFeedbackErrorDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) closer() _, err := mockStorage.GetUserFeedbackOnRuleDisable(testdata.ClusterName, testdata.Rule1ID, testdata.ErrorKey1, testdata.UserID) @@ -690,7 +670,7 @@ func TestDBStorageDisableFeedbackErrorDBError(t *testing.T) { // TestDBStorageListClustersForHittingRules checks the list of HittingClustersData // objects retrieved when ListOfClustersForOrgSpecificRule is called func TestDBStorageListClustersForHittingRules(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusterIds := []ctypes.ClusterName{ @@ -766,12 +746,16 @@ func TestDBStorageListClustersForHittingRules(t *testing.T) { // but only clusters for testdata.OrgID are returned, and since clusterIds[0] is // not active, an empty list of hitting clusters should be returned as well as an // ItemNotFoundError - list, err = mockStorage.ListOfClustersForOrgSpecificRule(testdata.OrgID, types.RuleSelector(testdata.Rule1CompositeID), []string{string(clusterIds[1]), string(clusterIds[2])}) + list, err = mockStorage.ListOfClustersForOrgSpecificRule(testdata.OrgID, types.RuleSelector(testdata.Rule1CompositeID), []string{ + string(clusterIds[1]), string(clusterIds[2]), + }) assert.Error(t, err) assert.IsType(t, &utypes.ItemNotFoundError{}, err) assert.Equal(t, []ctypes.HittingClustersData{}, list) - list, err = mockStorage.ListOfClustersForOrgSpecificRule(testdata.Org2ID, types.RuleSelector(testdata.Rule1CompositeID), []string{string(clusterIds[0]), string(clusterIds[1])}) + list, err = mockStorage.ListOfClustersForOrgSpecificRule(testdata.Org2ID, types.RuleSelector(testdata.Rule1CompositeID), []string{ + string(clusterIds[0]), string(clusterIds[1]), + }) assert.Error(t, err) assert.IsType(t, &utypes.ItemNotFoundError{}, err) assert.Equal(t, []ctypes.HittingClustersData{}, list) @@ -781,7 +765,7 @@ func TestDBStorageListClustersForHittingRules(t *testing.T) { // list of HittingClustersData objects is returned when the given org ID // has no associated entries in the recommendation table func TestDBStorageListClustersForHittingRulesOrgNotFound(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() helpers.FailOnError(t, mockStorage.WriteRecommendationsForCluster( @@ -800,7 +784,7 @@ func TestDBStorageListClustersForHittingRulesOrgNotFound(t *testing.T) { // ItemNotFoundError, independently of if a list of active clusters is passed // as the SQL query filter. func TestDBStorageListClustersForHittingRulesRuleNotFound(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() helpers.FailOnError(t, mockStorage.WriteRecommendationsForCluster( @@ -824,7 +808,7 @@ func TestDBStorageListClustersForHittingRulesRuleNotFound(t *testing.T) { // hitting recommendations returns no rows (Any other DB error will // be indicated to client as a 503). func TestDBStorageListClustersForHittingRulesNoRowsFound(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() list, err := mockStorage.ListOfClustersForOrgSpecificRule(testdata.OrgID, types.RuleSelector(testdata.Rule3CompositeID), nil) @@ -841,7 +825,7 @@ func TestDBStorageListClustersForHittingRulesNoRowsFound(t *testing.T) { // is given, which changes the query made to the DB, but not the expected // behavior. func TestDBStorageListFilteredClustersForHittingRulesNoRowsFound(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() list, err := mockStorage.ListOfClustersForOrgSpecificRule(testdata.OrgID, types.RuleSelector(testdata.Rule3CompositeID), []string{string(testdata.ClusterName)}) @@ -852,7 +836,7 @@ func TestDBStorageListFilteredClustersForHittingRulesNoRowsFound(t *testing.T) { // TestDBStorageListOfDisabledClustersOneRule checks that one cluster is returned func TestDBStorageListOfDisabledClustersOneRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]types.ClusterName, 2) @@ -882,7 +866,7 @@ func TestDBStorageListOfDisabledClustersOneRule(t *testing.T) { // TestDBStorageListOfDisabledClustersTwoClusters checks that two specific clusters are returned func TestDBStorageListOfDisabledClustersTwoClusters(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]types.ClusterName, 3) @@ -920,7 +904,7 @@ func TestDBStorageListOfDisabledClustersTwoClusters(t *testing.T) { // TestDBStorageListOfDisabledClustersDifferentRule checks that no cluster is returned // when a different rule is disabled. func TestDBStorageListOfDisabledClustersDifferentRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]types.ClusterName, 2) @@ -948,7 +932,7 @@ func TestDBStorageListOfDisabledClustersDifferentRule(t *testing.T) { // TestDBStorageListOfDisabledClustersFeedback tests that the disable feedback is properly returned func TestDBStorageListOfDisabledClustersFeedback(t *testing.T) { const feedback = "feedback test" - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]types.ClusterName, 2) @@ -985,7 +969,7 @@ func TestDBStorageListOfDisabledClustersFeedback(t *testing.T) { func TestDBStorageListOfDisabledClustersFeedbackUpdate(t *testing.T) { const oldFeedback = "feedback old" const newFeedback = "feedback new" - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]types.ClusterName, 2) @@ -1027,7 +1011,7 @@ func TestDBStorageListOfDisabledClustersFeedbackUpdate(t *testing.T) { // TestDBStorageListOfDisabledRulesForClustersDBError checks for DB error. func TestDBStorageListOfDisabledRulesForClustersDBError(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) closer() clusters := make([]string, 3) @@ -1043,7 +1027,7 @@ func TestDBStorageListOfDisabledRulesForClustersDBError(t *testing.T) { // TestDBStorageListOfDisabledRulesForClustersEmptyDB checks that no rules are returned // for empty DB. func TestDBStorageListOfDisabledRulesForClustersEmptyDB(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]string, 3) @@ -1062,7 +1046,7 @@ func TestDBStorageListOfDisabledRulesForClustersEmptyDB(t *testing.T) { // TestDBStorageListOfDisabledRulesForClustersOneRule checks that one rule is returned // for non empty DB and selected cluster func TestDBStorageListOfDisabledRulesForClustersOneRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]string, 3) @@ -1099,7 +1083,7 @@ func TestDBStorageListOfDisabledRulesForClustersOneRule(t *testing.T) { // TestDBStorageListOfDisabledRulesForClustersTwoRules checks that two rules are returned // for non empty DB. func TestDBStorageListOfDisabledRulesForClustersTwoRules(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]string, 3) @@ -1133,7 +1117,7 @@ func TestDBStorageListOfDisabledRulesForClustersTwoRules(t *testing.T) { // TestDBStorageListOfDisabledRulesForClustersNoRule checks that no rule is returned // for cluster that wasnt requested func TestDBStorageListOfDisabledRulesForClustersNoRule(t *testing.T) { - mockStorage, closer := ira_helpers.MustGetMockStorage(t, true) + mockStorage, closer := ira_helpers.MustGetPostgresStorage(t, true) defer closer() clusters := make([]string, 3) diff --git a/storage/storage_write_recommendations_benchmark_test.go b/storage/storage_write_recommendations_benchmark_test.go index 48e36cee9..9062992b7 100644 --- a/storage/storage_write_recommendations_benchmark_test.go +++ b/storage/storage_write_recommendations_benchmark_test.go @@ -296,12 +296,12 @@ func stopBenchmarkTimer(b *testing.B) { } // Only create recommendation table in the test DB -func mustPrepareRecommendationsBenchmark(b *testing.B) (storage.Storage, *sql.DB, func()) { +func mustPrepareRecommendationsBenchmark(b *testing.B) (storage.OCPRecommendationsStorage, *sql.DB, func()) { // Postgres queries are very verbose at DEBUG log level, so it's better // to silence them this way to make benchmark results easier to find. zerolog.SetGlobalLevel(zerolog.WarnLevel) mockStorage, closer := ira_helpers.MustGetPostgresStorage(b, false) - conn := storage.GetConnection(mockStorage.(*storage.DBStorage)) + conn := storage.GetConnection(mockStorage.(*storage.OCPRecommendationsDBStorage)) _, err := conn.Exec("DROP TABLE IF EXISTS recommendation;") helpers.FailOnError(b, err) @@ -314,7 +314,7 @@ func mustPrepareRecommendationsBenchmark(b *testing.B) (storage.Storage, *sql.DB // Only create recommendation table in the test DB, and insert numRows entries // in the table before the benchmarking timers are reset -func mustPrepareRecommendationsBenchmarkWithEntries(b *testing.B, numRows int) (storage.Storage, *sql.DB, func()) { +func mustPrepareRecommendationsBenchmarkWithEntries(b *testing.B, numRows int) (storage.OCPRecommendationsStorage, *sql.DB, func()) { mockStorage, conn, closer := mustPrepareReportAndRecommendationsBenchmark(b) for i := 0; i < numRows; i++ { @@ -325,19 +325,19 @@ func mustPrepareRecommendationsBenchmarkWithEntries(b *testing.B, numRows int) ( ) VALUES ($1, $2, $3) `, cluster, "a rule module", "an error key") helpers.FailOnError(b, err) - storage.SetClustersLastChecked(mockStorage.(*storage.DBStorage), types.ClusterName(cluster), time.Now()) + storage.SetClustersLastChecked(mockStorage.(*storage.OCPRecommendationsDBStorage), types.ClusterName(cluster), time.Now()) } return mockStorage, conn, closer } -func mustPrepareReportAndRecommendationsBenchmark(b *testing.B) (storage.Storage, *sql.DB, func()) { +func mustPrepareReportAndRecommendationsBenchmark(b *testing.B) (storage.OCPRecommendationsStorage, *sql.DB, func()) { // Postgres queries are very verbose at DEBUG log level, so it's better // to silence them this way to make benchmark results easier to find. zerolog.SetGlobalLevel(zerolog.WarnLevel) mockStorage, closer := ira_helpers.MustGetPostgresStorage(b, false) - conn := storage.GetConnection(mockStorage.(*storage.DBStorage)) + conn := storage.GetConnection(mockStorage.(*storage.OCPRecommendationsDBStorage)) _, err := conn.Exec("DROP TABLE IF EXISTS recommendation;") helpers.FailOnError(b, err) @@ -410,7 +410,7 @@ func BenchmarkNewRecommendationsExistingClusterConflict(b *testing.B) { id := uuid.New().String() if !clusterIDSet.contains(id) { clusterIDSet.add(id) - storage.SetClustersLastChecked(mockStorage.(*storage.DBStorage), types.ClusterName(id), time.Now()) + storage.SetClustersLastChecked(mockStorage.(*storage.OCPRecommendationsDBStorage), types.ClusterName(id), time.Now()) } } clusterIds := make([]string, 2*len(clusterIDSet.content)) @@ -451,7 +451,7 @@ func BenchmarkNewRecommendations2000initialEntries(b *testing.B) { id := uuid.New().String() if !clusterIDSet.contains(id) { clusterIDSet.add(id) - storage.SetClustersLastChecked(mockStorage.(*storage.DBStorage), types.ClusterName(id), time.Now()) + storage.SetClustersLastChecked(mockStorage.(*storage.OCPRecommendationsDBStorage), types.ClusterName(id), time.Now()) } } diff --git a/test.sh b/test.sh index 100140970..acb06d4c2 100755 --- a/test.sh +++ b/test.sh @@ -110,6 +110,17 @@ function populate_db_with_mock_data() { fi } +function check_composer() { + if command -v docker-compose > /dev/null; then + COMPOSER=docker-compose + elif command -v podman-compose > /dev/null; then + COMPOSER=podman-compose + else + echo "Please, install docker-compose or podman-compose to run this tests" + exit 1 + fi +} + function start_service() { if [ "$NO_SERVICE" = true ]; then echo "Not starting service" @@ -120,6 +131,8 @@ function start_service() { # TODO: stop parent(this script) if service died INSIGHTS_RESULTS_AGGREGATOR__LOGGING__LOG_LEVEL=$LOG_LEVEL \ INSIGHTS_RESULTS_AGGREGATOR_CONFIG_FILE=./tests/tests \ + INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB=aggregator \ + INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS=postgres \ ./insights-results-aggregator || echo -e "${COLORS_RED}service exited with error${COLORS_RESET}" & # shellcheck disable=2181 @@ -161,8 +174,22 @@ function test_rest_api() { return $EXIT_CODE } +function wait_for_postgres() { + until psql "dbname=aggregator user=postgres password=postgres host=localhost sslmode=disable" -c '\q' > /dev/null 2>&1; do + sleep 2 + done +} + + echo -e "------------------------------------------------------------------------------------------------" +if [ -z "$CI" ]; then + echo "Running postgres container locally" + check_composer + $COMPOSER up -d > /dev/null + wait_for_postgres +fi + case $1 in rest_api) test_rest_api @@ -179,6 +206,11 @@ rest_api) ;; esac +if [ -z "$CI" ]; then + echo "Stopping postgres container" + $COMPOSER down > /dev/null +fi + echo -e "------------------------------------------------------------------------------------------------" exit $EXIT_VALUE diff --git a/tests/config1.toml b/tests/config1.toml index 59238c9ff..b41d2d5a1 100644 --- a/tests/config1.toml +++ b/tests/config1.toml @@ -1,5 +1,5 @@ [broker] -address = "localhost:29092" +addresses = "localhost:29092" topic = "platform.results.ccx" group = "aggregator" enabled = false @@ -20,9 +20,8 @@ org_overview_limit_hours = 2 [processing] org_allowlist_file = "org_allowlist.csv" -[storage] -db_driver = "sqlite3" -sqlite_datasource = ":memory:" +[ocp_recommendations_storage] +db_driver = "postgres" pg_username = "user" pg_password = "password" pg_host = "localhost" @@ -31,6 +30,19 @@ pg_db_name = "aggregator" pg_params = "" type = "sql" +[dvo_recommendations_storage] +db_driver = "postgres" +pg_username = "user" +pg_password = "password" +pg_host = "localhost" +pg_port = 5432 +pg_db_name = "aggregator" +pg_params = "" +type = "sql" + +[storage_backend] +use = "" + [redis] database = 0 endpoint = "localhost:6379" @@ -49,3 +61,7 @@ logging_to_cloud_watch_enabled = false [metrics] namespace = "aggregator" + +[sentry] +dsn = "test.example.com" +environment = "test" diff --git a/tests/helpers/http.go b/tests/helpers/http.go index aafc8562b..498923c8c 100644 --- a/tests/helpers/http.go +++ b/tests/helpers/http.go @@ -67,6 +67,7 @@ var DefaultServerConfigAuth = server.Configuration{ APISpecFile: "openapi.json", Debug: true, Auth: true, + AuthType: "xrh", MaximumFeedbackMessageLength: 255, OrgOverviewLimitHours: 2, } @@ -77,21 +78,21 @@ var DefaultServerConfigAuth = server.Configuration{ // sends api request and checks api response (see docs for APIRequest and APIResponse) func AssertAPIRequest( t testing.TB, - mockStorage storage.Storage, + mockStorage storage.OCPRecommendationsStorage, serverConfig *server.Configuration, request *helpers.APIRequest, expectedResponse *helpers.APIResponse, ) { if mockStorage == nil { var closer func() - mockStorage, closer = MustGetMockStorage(t, true) + mockStorage, closer = MustGetPostgresStorage(t, true) defer closer() } if serverConfig == nil { serverConfig = &DefaultServerConfig } - testServer := server.New(*serverConfig, mockStorage) + testServer := server.New(*serverConfig, mockStorage, nil) helpers.AssertAPIRequest(t, testServer, serverConfig.APIPrefix, request, expectedResponse) } diff --git a/tests/helpers/mock_consumer.go b/tests/helpers/mock_consumer.go index 8d33c1376..508926780 100644 --- a/tests/helpers/mock_consumer.go +++ b/tests/helpers/mock_consumer.go @@ -53,16 +53,16 @@ func (mockKafkaConsumer *MockKafkaConsumer) Close(t testing.TB) { helpers.FailOnError(t, err) } -// MustGetMockKafkaConsumerWithExpectedMessages creates mocked kafka consumer -// which produces list of messages automatically +// MustGetMockOCPRulesConsumerWithExpectedMessages creates mocked OCP rules +// consumer which produces list of messages automatically // calls t.Fatal on error -func MustGetMockKafkaConsumerWithExpectedMessages( +func MustGetMockOCPRulesConsumerWithExpectedMessages( t testing.TB, topic string, orgAllowlist mapset.Set, messages []string, ) (*MockKafkaConsumer, func()) { - mockConsumer, closer, err := GetMockKafkaConsumerWithExpectedMessages(t, topic, orgAllowlist, messages) + mockConsumer, closer, err := GetMockOCPRulesConsumerWithExpectedMessages(t, topic, orgAllowlist, messages) if err != nil { t.Fatal(err) } @@ -70,23 +70,69 @@ func MustGetMockKafkaConsumerWithExpectedMessages( return mockConsumer, closer } -// GetMockKafkaConsumerWithExpectedMessages creates mocked kafka consumer -// which produces list of messages automatically -func GetMockKafkaConsumerWithExpectedMessages( +// MustGetMockDVOConsumerWithExpectedMessages same as MustGetMockOCPRulesConsumerWithExpectedMessages +// but for DVO +func MustGetMockDVOConsumerWithExpectedMessages( + t testing.TB, + topic string, + orgAllowlist mapset.Set, + messages []string, +) (*MockKafkaConsumer, func()) { + mockConsumer, closer, err := GetMockDVOConsumerWithExpectedMessages(t, topic, orgAllowlist, messages) + if err != nil { + t.Fatal(err) + } + + return mockConsumer, closer +} + +// GetMockOCPRulesConsumerWithExpectedMessages creates mocked OCP rules +// consumer which produces list of messages automatically +func GetMockOCPRulesConsumerWithExpectedMessages( + t testing.TB, topic string, orgAllowlist mapset.Set, messages []string, +) (*MockKafkaConsumer, func(), error) { + mockStorage, storageCloser := MustGetPostgresStorage(t, true) + + mockConsumer := &MockKafkaConsumer{ + KafkaConsumer: consumer.KafkaConsumer{ + Configuration: broker.Configuration{ + Addresses: "", + Topic: topic, + Group: "", + Enabled: true, + OrgAllowlist: orgAllowlist, + }, + Storage: mockStorage, + MessageProcessor: consumer.OCPRulesProcessor{}, + }, + topic: topic, + messages: messages, + } + + return mockConsumer, func() { + storageCloser() + mockConsumer.Close(t) + }, nil +} + +// GetMockDVOConsumerWithExpectedMessages same as GetMockOCPRulesConsumerWithExpectedMessages +// but for DVO +func GetMockDVOConsumerWithExpectedMessages( t testing.TB, topic string, orgAllowlist mapset.Set, messages []string, ) (*MockKafkaConsumer, func(), error) { - mockStorage, storageCloser := MustGetMockStorage(t, true) + mockStorage, storageCloser := MustGetPostgresStorageDVO(t, true) mockConsumer := &MockKafkaConsumer{ KafkaConsumer: consumer.KafkaConsumer{ Configuration: broker.Configuration{ - Address: "", + Addresses: "", Topic: topic, Group: "", Enabled: true, OrgAllowlist: orgAllowlist, }, - Storage: mockStorage, + Storage: mockStorage, + MessageProcessor: consumer.DVORulesProcessor{}, }, topic: topic, messages: messages, diff --git a/tests/helpers/mock_storage.go b/tests/helpers/mock_storage.go index 6faf96dbb..cb31890a2 100644 --- a/tests/helpers/mock_storage.go +++ b/tests/helpers/mock_storage.go @@ -26,46 +26,50 @@ import ( "github.com/google/uuid" "github.com/RedHatInsights/insights-results-aggregator/conf" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/storage" "github.com/RedHatInsights/insights-results-aggregator/types" ) -const sqlite3 = "sqlite3" -const postgres = "postgres" - -// MustGetMockStorage creates mocked storage based on in-memory Sqlite instance by default -// or on postgresql with config taken from config-devel.toml -// if env variable INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB is set to "postgres" -// INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS is set to db admin's password -// produces t.Fatal(err) on error -func MustGetMockStorage(tb testing.TB, init bool) (storage.Storage, func()) { - return MustGetPostgresStorage(tb, init) -} +const ( + postgres = "postgres" + unfulfilledExpectations = "there were unfulfilled expectations: %s" +) // MustGetMockStorageWithExpects returns mock db storage // with a driver "github.com/DATA-DOG/go-sqlmock" which requires you to write expect -// before each query, so first try to use MustGetMockStorage +// before each query, so first try to use MustGetPostgresStorage // don't forget to call MustCloseMockStorageWithExpects -func MustGetMockStorageWithExpects(t *testing.T) (storage.Storage, sqlmock.Sqlmock) { +func MustGetMockStorageWithExpects(t *testing.T) (storage.OCPRecommendationsStorage, sqlmock.Sqlmock) { return MustGetMockStorageWithExpectsForDriver(t, types.DBDriverGeneral) } // MustGetMockStorageWithExpectsForDriver returns mock db storage // with specified driver type and // with a driver "github.com/DATA-DOG/go-sqlmock" which requires you to write expect -// before each query, so first try to use MustGetMockStorage +// before each query, so first try to use MustGetPostgresStorage // don't forget to call MustCloseMockStorageWithExpects func MustGetMockStorageWithExpectsForDriver( t *testing.T, driverType types.DBDriver, -) (storage.Storage, sqlmock.Sqlmock) { +) (storage.OCPRecommendationsStorage, sqlmock.Sqlmock) { + db, expects := MustGetMockDBWithExpects(t) + + return storage.NewOCPRecommendationsFromConnection(db, driverType), expects +} + +// MustGetMockStorageWithExpectsForDriverDVO same as MustGetMockStorageWithExpectsForDriver +// but for DVO +func MustGetMockStorageWithExpectsForDriverDVO( + t *testing.T, driverType types.DBDriver, +) (storage.DVORecommendationsStorage, sqlmock.Sqlmock) { db, expects := MustGetMockDBWithExpects(t) - return storage.NewFromConnection(db, driverType), expects + return storage.NewDVORecommendationsFromConnection(db, driverType), expects } // MustGetMockDBWithExpects returns mock db // with a driver "github.com/DATA-DOG/go-sqlmock" which requires you to write expect -// before each query, so first try to use MustGetMockStorage +// before each query, so first try to use MustGetPostgresStorage // don't forget to call MustCloseMockDBWithExpects func MustGetMockDBWithExpects(t *testing.T) (*sql.DB, sqlmock.Sqlmock) { db, expects, err := sqlmock.New() @@ -76,10 +80,22 @@ func MustGetMockDBWithExpects(t *testing.T) (*sql.DB, sqlmock.Sqlmock) { // MustCloseMockStorageWithExpects closes mock storage with expects and panics if it wasn't successful func MustCloseMockStorageWithExpects( - t *testing.T, mockStorage storage.Storage, expects sqlmock.Sqlmock, + t *testing.T, mockStorage storage.OCPRecommendationsStorage, expects sqlmock.Sqlmock, +) { + if err := expects.ExpectationsWereMet(); err != nil { + t.Errorf(unfulfilledExpectations, err) + } + + expects.ExpectClose() + helpers.FailOnError(t, mockStorage.Close()) +} + +// MustCloseMockStorageWithExpectsDVO same asMustCloseMockStorageWithExpects +func MustCloseMockStorageWithExpectsDVO( + t *testing.T, mockStorage storage.DVORecommendationsStorage, expects sqlmock.Sqlmock, ) { if err := expects.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) + t.Errorf(unfulfilledExpectations, err) } expects.ExpectClose() @@ -91,64 +107,70 @@ func MustCloseMockDBWithExpects( t *testing.T, db *sql.DB, expects sqlmock.Sqlmock, ) { if err := expects.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) + t.Errorf(unfulfilledExpectations, err) } expects.ExpectClose() helpers.FailOnError(t, db.Close()) } -// MustGetSQLiteMemoryStorage creates test sqlite storage in file -func MustGetSQLiteMemoryStorage(tb testing.TB, init bool) (storage.Storage, func()) { - sqliteStorage := mustGetSqliteStorage(tb, ":memory:", init) - - return sqliteStorage, func() { - MustCloseStorage(tb, sqliteStorage) - } -} +// MustGetPostgresStorage creates test postgres storage with credentials from config-devel +func MustGetPostgresStorage(tb testing.TB, init bool) (storage.OCPRecommendationsStorage, func()) { + dbAdminPassword := os.Getenv("INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS") -// MustGetSQLiteFileStorage creates test sqlite storage in file -func MustGetSQLiteFileStorage(tb testing.TB, init bool) (storage.Storage, func()) { - dbFilename := fmt.Sprintf("/tmp/insights-results-aggregator.test.%v.db", uuid.New().String()) + err := conf.LoadConfiguration("../config-devel") + helpers.FailOnError(tb, err) - sqliteStorage := mustGetSqliteStorage(tb, dbFilename, init) + // force postgres and replace db name with test one + storageConf := &conf.Config.OCPRecommendationsStorage + storageConf.Driver = postgres + storageConf.PGDBName += "_test_db_" + strings.ReplaceAll(uuid.New().String(), "-", "_") + storageConf.PGPassword = dbAdminPassword - return sqliteStorage, func() { - MustCloseStorage(tb, sqliteStorage) - helpers.FailOnError(tb, os.Remove(dbFilename)) - } -} + connString := fmt.Sprintf( + "host=%s port=%d user=%s password=%s sslmode=disable", + storageConf.PGHost, storageConf.PGPort, storageConf.PGUsername, storageConf.PGPassword, + ) -func mustGetSqliteStorage(tb testing.TB, datasource string, init bool) storage.Storage { - db, err := sql.Open(sqlite3, datasource) + adminConn, err := sql.Open(storageConf.Driver, connString) helpers.FailOnError(tb, err) - _, err = db.Exec("PRAGMA foreign_keys = ON;") + query := "CREATE DATABASE " + storageConf.PGDBName + ";" + _, err = adminConn.Exec(query) helpers.FailOnError(tb, err) - sqliteStorage := storage.NewFromConnection(db, types.DBDriverSQLite3) + postgresStorage, err := storage.NewOCPRecommendationsStorage(*storageConf) + + helpers.FailOnError(tb, err) + helpers.FailOnError(tb, postgresStorage.GetConnection().Ping()) if init { - helpers.FailOnError(tb, sqliteStorage.MigrateToLatest()) - helpers.FailOnError(tb, sqliteStorage.Init()) + helpers.FailOnError(tb, postgresStorage.MigrateToLatest()) + helpers.FailOnError(tb, postgresStorage.Init()) } - return sqliteStorage + return postgresStorage, func() { + MustCloseStorage(tb, postgresStorage) + + _, err := adminConn.Exec("DROP DATABASE " + conf.Config.OCPRecommendationsStorage.PGDBName) + helpers.FailOnError(tb, err) + + helpers.FailOnError(tb, adminConn.Close()) + } } -// MustGetPostgresStorage creates test postgres storage with credentials from config-devel -func MustGetPostgresStorage(tb testing.TB, init bool) (storage.Storage, func()) { +// MustGetPostgresStorageDVO creates test postgres storage with credentials from config-devel for DVO storage +func MustGetPostgresStorageDVO(tb testing.TB, init bool) (storage.DVORecommendationsStorage, func()) { dbAdminPassword := os.Getenv("INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS") err := conf.LoadConfiguration("../config-devel") helpers.FailOnError(tb, err) // force postgres and replace db name with test one - storageConf := &conf.Config.Storage + storageConf := &conf.Config.DVORecommendationsStorage storageConf.Driver = postgres storageConf.PGDBName += "_test_db_" + strings.ReplaceAll(uuid.New().String(), "-", "_") storageConf.PGPassword = dbAdminPassword - storageConf.PGUsername = postgres connString := fmt.Sprintf( "host=%s port=%d user=%s password=%s sslmode=disable", @@ -162,10 +184,11 @@ func MustGetPostgresStorage(tb testing.TB, init bool) (storage.Storage, func()) _, err = adminConn.Exec(query) helpers.FailOnError(tb, err) - postgresStorage, err := storage.New(*storageConf) + postgresStorage, err := storage.NewDVORecommendationsStorage(*storageConf) helpers.FailOnError(tb, err) helpers.FailOnError(tb, postgresStorage.GetConnection().Ping()) + helpers.FailOnError(tb, migration.InitDBSchema(postgresStorage.GetConnection(), postgresStorage.GetDBSchema())) if init { helpers.FailOnError(tb, postgresStorage.MigrateToLatest()) @@ -175,7 +198,7 @@ func MustGetPostgresStorage(tb testing.TB, init bool) (storage.Storage, func()) return postgresStorage, func() { MustCloseStorage(tb, postgresStorage) - _, err := adminConn.Exec("DROP DATABASE " + conf.Config.Storage.PGDBName) + _, err := adminConn.Exec("DROP DATABASE " + conf.Config.DVORecommendationsStorage.PGDBName) helpers.FailOnError(tb, err) helpers.FailOnError(tb, adminConn.Close()) @@ -186,3 +209,39 @@ func MustGetPostgresStorage(tb testing.TB, init bool) (storage.Storage, func()) func MustCloseStorage(tb testing.TB, s storage.Storage) { helpers.FailOnError(tb, s.Close()) } + +// PrepareDB prepares mock OCPRecommendationsDBStorage +func PrepareDB(t *testing.T) (*storage.OCPRecommendationsDBStorage, func()) { + mockStorage, closer := MustGetPostgresStorage(t, false) + dbStorage := mockStorage.(*storage.OCPRecommendationsDBStorage) + + return dbStorage, closer +} + +// PrepareDBDVO prepares mock DVORecommendationsDBStorage +func PrepareDBDVO(t *testing.T) (*storage.DVORecommendationsDBStorage, func()) { + mockStorage, closer := MustGetPostgresStorageDVO(t, true) + dbStorage := mockStorage.(*storage.DVORecommendationsDBStorage) + + return dbStorage, closer +} + +// PrepareDBAndInfo prepares mock OCPRecommendationsDBStorage and info table +func PrepareDBAndInfo(t *testing.T) ( + *sql.DB, + types.DBDriver, + migration.Schema, + func(), +) { + storage, closer := PrepareDB(t) + + dbConn := storage.GetConnection() + dbSchema := storage.GetDBSchema() + + if err := migration.InitInfoTable(dbConn, dbSchema); err != nil { + closer() + t.Fatal(err) + } + + return dbConn, storage.GetDBDriverType(), dbSchema, closer +} diff --git a/tests/rest/info.go b/tests/rest/info.go index d9928914e..53fb24c06 100644 --- a/tests/rest/info.go +++ b/tests/rest/info.go @@ -35,7 +35,8 @@ func checkInfoEndpoint() { "BuildCommit", "BuildTime", "BuildVersion", - "DB_version", + "OCP_DB_version", + "DVO_DB_version", "UtilsVersion", } diff --git a/tests/tests.toml b/tests/tests.toml index bc66b7271..3daa312d1 100644 --- a/tests/tests.toml +++ b/tests/tests.toml @@ -1,5 +1,5 @@ [broker] -address = "localhost:29093" +addresses = "localhost:29093" topic = "ccx.ocp.results" group = "aggregator" enabled = false @@ -20,11 +20,29 @@ org_overview_limit_hours = 3 [processing] org_allowlist_file = "org_allowlist.csv" -[storage] -db_driver = "sqlite3" -sqlite_datasource = "./test.db" +[ocp_recommendations_storage] +db_driver = "postgres" +pg_username = "postgres" +pg_password = "postgres" +pg_host = "localhost" +pg_port = 5432 +pg_db_name = "aggregator" +pg_params = "sslmode=disable" type = "sql" +[dvo_recommendations_storage] +db_driver = "postgres" +pg_username = "postgres" +pg_password = "postgres" +pg_host = "localhost" +pg_port = 5432 +pg_db_name = "aggregator" +pg_params = "sslmode=disable" +type = "sql" + +[storage_backend] +use = "ocp_recommendations" + [redis] database = 0 endpoint = "localhost:6379" @@ -39,4 +57,5 @@ debug = false log_level = "" logging_to_cloud_watch_enabled = false -[cloudwatch] +[metrics] +namespace = "aggregator" diff --git a/types/errors.go b/types/errors.go index e97584f74..d465f0e1a 100644 --- a/types/errors.go +++ b/types/errors.go @@ -26,7 +26,6 @@ import ( "github.com/RedHatInsights/insights-operator-utils/types" "github.com/lib/pq" - "github.com/mattn/go-sqlite3" "github.com/rs/zerolog/log" ) @@ -66,7 +65,6 @@ func (err *TableAlreadyExistsError) Error() string { } // ForeignKeyError something violates foreign key error -// tableName and foreignKeyName can be empty for DBs not supporting it (SQLite) type ForeignKeyError struct { TableName string ForeignKeyName string @@ -102,7 +100,6 @@ func ConvertDBError(err error, itemID interface{}) error { } err = convertPostgresError(err) - err = convertSQLiteError(err) return err } @@ -122,9 +119,6 @@ func regexGetNthMatchOrLogError(regexStr string, nMatch uint, str string) string return match } -func regexGetFirstMatch(regexStr, str string) (string, error) { - return regexGetNthMatch(regexStr, 1, str) -} func regexGetNthMatch(regexStr string, nMatch uint, str string) (string, error) { regex := regexp.MustCompile(regexStr) @@ -167,30 +161,3 @@ func convertPostgresError(err error) error { return err } - -func convertSQLiteError(err error) error { - sqlite3Error, ok := err.(sqlite3.Error) - if !ok { - return err - } - - errString := sqlite3Error.Error() - - if errString == "FOREIGN KEY constraint failed" { - return &ForeignKeyError{} - } - - if match, err := regexGetFirstMatch(`no such table: (.+)`, errString); err == nil { - return &TableNotFoundError{ - tableName: match, - } - } - - if match, err := regexGetFirstMatch(`table (.+) already exists`, errString); err == nil { - return &TableAlreadyExistsError{ - tableName: match, - } - } - - return err -} diff --git a/types/storage_backends.go b/types/storage_backends.go new file mode 100644 index 000000000..e327f0e94 --- /dev/null +++ b/types/storage_backends.go @@ -0,0 +1,23 @@ +// Copyright 2023 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +const ( + // OCPRecommendationsStorage represents OCP recommendations database schema + OCPRecommendationsStorage = "ocp_recommendations" + + // DVORecommendationsStorage represents DVO recommendations database schema + DVORecommendationsStorage = "dvo_recommendations" +) diff --git a/types/types.go b/types/types.go index 8f62632c8..b40327798 100644 --- a/types/types.go +++ b/types/types.go @@ -97,8 +97,6 @@ type ( ) const ( - // DBDriverSQLite3 shows that db driver is sqlite - DBDriverSQLite3 = types.DBDriverSQLite3 // DBDriverPostgres shows that db driver is postgres DBDriverPostgres = types.DBDriverPostgres // DBDriverGeneral general sql(used for mock now) @@ -125,6 +123,87 @@ type InfoItem struct { Details map[string]string `json:"details"` } +/* This is how a DVO workload should look like: +{ + "system": { + "metadata": {}, + "hostname": null + }, + "fingerprints": [], + "version": 1, + "analysis_metadata": {}, + "workload_recommendations": [ + { + "response_id": "an_issue|DVO_AN_ISSUE", + "component": "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", + "key": "DVO_AN_ISSUE", + "details": {}, + "tags": [], + "links": { + "jira": [ + "https://issues.redhat.com/browse/AN_ISSUE" + ], + "product_documentation": [] + }, + "workloads": [ + { + "namespace": "namespace-name-A", + "namespace_uid": "NAMESPACE-UID-A", + "kind": "DaemonSet", + "name": "test-name-0099", + "uid": "UID-0099" + } + ] + } + ] +} +*/ + +// DVOMetrics contains all the workload recommendations for a single cluster +type DVOMetrics struct { + WorkloadRecommendations []WorkloadRecommendation `json:"workload_recommendations"` +} + +// WorkloadRecommendation contains all the information about the recommendation +// Details is generic interface{} because it contains template data used to fill rule content, i.e. we don't/should't know the structure +type WorkloadRecommendation struct { + ResponseID string `json:"response_id"` + Component string `json:"component"` + Key string `json:"key"` + Details map[string]interface{} `json:"details"` + Tags []string `json:"tags"` + Links DVOLinks `json:"links"` + Workloads []DVOWorkload `json:"workloads"` +} + +// DVOWorkload contains the main information of the workload recommendation +type DVOWorkload struct { + Namespace string `json:"namespace"` + NamespaceUID string `json:"namespace_uid"` + Kind string `json:"kind"` + Name string `json:"name"` + UID string `json:"uid"` +} + +// DVOLinks contains some URLs with relevant information about the recommendation +type DVOLinks struct { + Jira []string `json:"jira"` + ProductDocumentation []string `json:"product_documentation"` +} + +// DVOReport represents a single row of the dvo.dvo_report table. +type DVOReport struct { + OrgID string `json:"org_id"` + NamespaceID string `json:"namespace_id"` + NamespaceName string `json:"namespace_name"` + ClusterID string `json:"cluster_id"` + Recommendations uint `json:"recommendations"` + Report string `json:"report"` + Objects uint `json:"objects"` + ReportedAt types.Timestamp `json:"reported_at"` + LastCheckedAt types.Timestamp `json:"last_checked_at"` +} + // ClusterReports is a data structure containing list of clusters, list of // errors and dictionary with results per cluster. type ClusterReports = types.ClusterReports diff --git a/unit-tests.sh b/unit-tests.sh index fe48c99e5..67e326a51 100755 --- a/unit-tests.sh +++ b/unit-tests.sh @@ -18,13 +18,13 @@ STORAGE=$1 function run_unit_tests() { if [ -z "$TEST_TO_RUN" ]; then echo "No specific tests given. Running all available tests." - run_cmd="" + run_cmd="$(go list ./... | grep -v tests | tr '\n' ' ')" else echo "Running specific test $TEST_TO_RUN" - run_cmd="-run $TEST_TO_RUN" + run_cmd="$TEST_TO_RUN" fi # shellcheck disable=SC2046 - if ! go test -timeout 5m $run_cmd -coverprofile coverage.out $(go list ./... | grep -v tests | tr '\n' ' ') + if ! go test -timeout 5m -coverprofile coverage.out $run_cmd then echo "unit tests failed" exit 1 @@ -43,17 +43,23 @@ function check_composer() { } +function wait_for_postgres() { + until psql "dbname=aggregator user=postgres password=postgres host=localhost sslmode=disable" -c '\q' ; do + sleep 1 + done +} if [ -z "$CI" ]; then echo "Running postgres container locally" check_composer $COMPOSER up -d > /dev/null + wait_for_postgres fi path_to_config=$(pwd)/config-devel.toml export INSIGHTS_RESULTS_AGGREGATOR_CONFIG_FILE="$path_to_config" -export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB="postgres" -export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS="admin" +export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB="aggregator" +export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS="postgres" run_unit_tests if [ -z "$CI" ]; then