diff --git a/deploy/clowdapp.yaml b/deploy/clowdapp.yaml index 997a745df..04317e048 100644 --- a/deploy/clowdapp.yaml +++ b/deploy/clowdapp.yaml @@ -345,7 +345,7 @@ objects: # the DB name should match to app-interface DB name without specifying environment # https://gitlab.cee.redhat.com/service/app-interface/-/blob/ddd85c2ad79b40047391405b2d909eb38667bc43/data/services/insights/ccx-data-pipeline/namespaces/stage-ccx-data-pipeline-stage.yml#L60 name: ccx-data-pipeline - version: 15 + version: 16 kafkaTopics: - replicas: 3 partitions: 1 @@ -421,11 +421,11 @@ parameters: name: MAX_REPLICAS value: '6' - name: IRA_CPU_LIMIT - value: 200m + value: 300m - name: IRA_LOG_STREAM value: insights-results-aggregator - name: IRA_MEMORY_LIMIT - value: 300Mi + value: 450mi - name: IRA_CPU_REQUEST value: 100m - name: IRA_MEMORY_REQUEST diff --git a/deploy/dvo-writer.yaml b/deploy/dvo-writer.yaml index fae8f1f53..91053e3e7 100644 --- a/deploy/dvo-writer.yaml +++ b/deploy/dvo-writer.yaml @@ -31,7 +31,7 @@ objects: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: dvo-writer + name: dvo-writer-instance targetCPUUtilizationPercentage: 80 - apiVersion: cloud.redhat.com/v1alpha1 @@ -44,7 +44,7 @@ objects: iqePlugin: ccx deployments: - name: instance - minReplicas: ${{DVO_WRITER_REPLICAS}} + minReplicas: ${{MIN_REPLICAS}} webServices: public: enabled: false @@ -328,9 +328,6 @@ parameters: value: / - name: LOG_STREAM value: dvo-writer -- name: DVO_WRITER_REPLICAS - description: The number of replicas to use for the dvo-writer deployment - value: "1" - name: DVO_WRITER_CPU_LIMIT value: 200m - name: DVO_WRITER_MEMORY_LIMIT diff --git a/docs/testing.md b/docs/testing.md index b2f765f1e..e86456e15 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -57,7 +57,7 @@ By default all logs from the application aren't shown, if you want to see them, To make a coverage report you need to start `./make-coverage.sh` tool with one of these arguments: -1. `unit-posgres` unit tests with postgres database(don't forget to start `docker-compose up` with the DB) +1. `unit-postgres` unit tests with postgres database(don't forget to start `docker-compose up` with the DB) 1. `rest` REST API tests from `test.sh` file 1. `integration` Any external tests, for example from iqe-ccx-plugin. Only this option requires you to run tests manually and stop the script by `Ctrl+C` when they are done diff --git a/go.mod b/go.mod index e4fe6d6df..dafb7ef3b 100644 --- a/go.mod +++ b/go.mod @@ -3,11 +3,11 @@ module github.com/RedHatInsights/insights-results-aggregator go 1.20 require ( - github.com/BurntSushi/toml v1.3.2 + github.com/BurntSushi/toml v1.4.0 github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/RedHatInsights/insights-operator-utils v1.25.4 github.com/RedHatInsights/insights-results-aggregator-data v1.3.9 - github.com/RedHatInsights/insights-results-types v1.3.23 + github.com/RedHatInsights/insights-results-types v1.23.4 github.com/Shopify/sarama v1.27.1 github.com/deckarep/golang-set v1.8.0 github.com/gchaincl/sqlhooks v1.3.0 @@ -15,15 +15,15 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/lib/pq v1.10.9 - github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/client_model v0.6.0 - github.com/redhatinsights/app-common-go v1.6.7 - github.com/rs/zerolog v1.32.0 - github.com/spf13/viper v1.18.2 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.1 + github.com/redhatinsights/app-common-go v1.6.8 + github.com/rs/zerolog v1.33.0 + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/verdverm/frisby v0.0.0-20170604211311-b16556248a9a github.com/xdg/scram v1.0.5 - golang.org/x/sync v0.6.0 + golang.org/x/sync v0.7.0 ) require ( @@ -51,13 +51,13 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/klauspost/compress v1.17.0 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mozillazg/request v0.8.0 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common v0.48.0 // indirect @@ -75,10 +75,10 @@ require ( github.com/xdg/stringprep v1.0.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.18.0 // indirect + golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/h2non/gock.v1 v1.1.2 // indirect diff --git a/go.sum b/go.sum index 2866ad079..c8bee933b 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= @@ -59,8 +59,8 @@ github.com/RedHatInsights/insights-results-aggregator-data v1.3.9/go.mod h1:sL0a github.com/RedHatInsights/insights-results-types v1.2.0/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= github.com/RedHatInsights/insights-results-types v1.3.20/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= github.com/RedHatInsights/insights-results-types v1.3.22/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= -github.com/RedHatInsights/insights-results-types v1.3.23 h1:F0QlBqZup7KKEzxe84/Wmp9kunuoHbQf9Ir3kLpUBAk= -github.com/RedHatInsights/insights-results-types v1.3.23/go.mod h1:6VVdMTGU/BAS2cW0KrHAUiDyocpyKqpPpEyp6AJ1tk8= +github.com/RedHatInsights/insights-results-types v1.23.4 h1:BWFxaaDRaNozhXmf1W25WnGI4qsulVACLD5PoUdWUE8= +github.com/RedHatInsights/insights-results-types v1.23.4/go.mod h1:Cz4DzWtf860oCPtdjIRa26ZbDP++rMhCSPZvgXEuSHQ= github.com/RedHatInsights/kafka-zerolog v0.0.0-20210304172207-928f026dc7ec/go.mod h1:HJul5oCsCRNiRlh/ayJDGdW3PzGlid/5aaQwJBn7was= github.com/RedHatInsights/kafka-zerolog v1.0.0 h1:4zPrLcwnfFl07qv9/ximlm1E/rWs93TkYnHrgNiU73A= github.com/RedHatInsights/kafka-zerolog v1.0.0/go.mod h1:HJul5oCsCRNiRlh/ayJDGdW3PzGlid/5aaQwJBn7was= @@ -386,8 +386,8 @@ github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -504,8 +504,8 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -532,16 +532,16 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -568,8 +568,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/redhatinsights/app-common-go v1.6.7 h1:cXWW0F6ZW53RLRr54gn7Azo9CLTysYOmFDR0D0Qd0Fs= -github.com/redhatinsights/app-common-go v1.6.7/go.mod h1:6gzRyg8ZyejwMCksukeAhh2ZXOB3uHSmBsbP06fG2PQ= +github.com/redhatinsights/app-common-go v1.6.8 h1:hyExMp6WHprlGkHKElQvSFF2ZPX8XTW6X+54PLLyUv0= +github.com/redhatinsights/app-common-go v1.6.8/go.mod h1:KW0BK+bnhp3kXU8BFwebQXqCqjdkcRewZsDlXCSNMyo= github.com/redis/go-redis/v9 v9.4.0 h1:Yzoz33UZw9I/mFhx4MNrB6Fk+XHO1VukNcCa1+lwyKk= github.com/redis/go-redis/v9 v9.4.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -580,8 +580,8 @@ github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= -github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= -github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -628,8 +628,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -638,6 +638,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -725,8 +726,8 @@ golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -780,8 +781,8 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c/go.mod h1:iQL9McJNjoIa5mjH6nYTCTZXUN6RP+XW3eib7Ya3XcI= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -793,8 +794,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -837,8 +838,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/make-coverage.sh b/make-coverage.sh index 04149bd2e..42ab70c8d 100755 --- a/make-coverage.sh +++ b/make-coverage.sh @@ -21,9 +21,6 @@ rm coverage.out 2>/dev/null case $1 in "unit-postgres") - export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB="postgres" - export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS="admin" - echo "Running unit tests with postgres..." go test -timeout $TIMEOUT -coverprofile=coverage.out ./... 1>&2 ;; diff --git a/migration/dvomigrations/actual_migrations_test.go b/migration/dvomigrations/actual_migrations_test.go index 0f792ea3e..c074fe42f 100644 --- a/migration/dvomigrations/actual_migrations_test.go +++ b/migration/dvomigrations/actual_migrations_test.go @@ -15,13 +15,34 @@ package dvomigrations_test import ( + "database/sql/driver" + "encoding/json" + "errors" "testing" + "github.com/RedHatInsights/insights-results-aggregator-data/testdata" + "github.com/stretchr/testify/assert" + "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/migration/dvomigrations" "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" ) +type ruleHitsCount map[string]int + +func (in ruleHitsCount) Value() (driver.Value, error) { + return json.Marshal(in) +} + +func (in *ruleHitsCount) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.New("not byte array") + } + + return json.Unmarshal(b, &in) +} + func TestAllMigrations(t *testing.T) { db, closer := helpers.PrepareDBDVO(t) defer closer() @@ -39,3 +60,69 @@ func TestAllMigrations(t *testing.T) { err = migration.SetDBVersion(dbConn, db.GetDBDriverType(), dbSchema, migration.Version(0), dvomigrations.UsableDVOMigrations) helpers.FailOnError(t, err) } + +func Test0004RuleHitsCount(t *testing.T) { + db, closer := helpers.PrepareDBDVO(t) + defer closer() + + dbConn := db.GetConnection() + dbSchema := db.GetDBSchema() + + err := migration.SetDBVersion(dbConn, db.GetDBDriverType(), dbSchema, migration.Version(3), dvomigrations.UsableDVOMigrations) + helpers.FailOnError(t, err) + + // insert before mig 0004 to test that default values are parsable + _, err = dbConn.Exec(` + INSERT INTO dvo.dvo_report (org_id, cluster_id, namespace_id, recommendations, objects) VALUES ($1, $2, $3, $4, $5); + `, + testdata.OrgID, + testdata.ClusterName, + "namespace", + 1, + 2, + ) + helpers.FailOnError(t, err) + + err = migration.SetDBVersion(dbConn, db.GetDBDriverType(), dbSchema, migration.Version(4), dvomigrations.UsableDVOMigrations) + helpers.FailOnError(t, err) + + var ruleHits ruleHitsCount + err = dbConn.QueryRow(` + SELECT rule_hits_count FROM dvo.dvo_report WHERE cluster_id = $1;`, + testdata.ClusterName, + ).Scan( + &ruleHits, + ) + + helpers.FailOnError(t, err) + // must be valid parsable json for existing rows + assert.Equal(t, "{}", helpers.ToJSONString(ruleHits)) + + cID := testdata.GetRandomClusterID() + + ruleHitsInput := ruleHitsCount{ + string(testdata.Rule1CompositeID): 1, + } + // insert a struct directly, implemented Value() method will take care of marshalling, validation, .. + _, err = dbConn.Exec(` + INSERT INTO dvo.dvo_report (org_id, cluster_id, namespace_id, recommendations, objects, rule_hits_count) VALUES ($1, $2, $3, $4, $5, $6); + `, + testdata.OrgID, + cID, + "namespace", + 1, + 2, + ruleHitsInput, + ) + helpers.FailOnError(t, err) + + err = dbConn.QueryRow(` + SELECT rule_hits_count FROM dvo.dvo_report WHERE cluster_id = $1;`, + cID, + ).Scan( + &ruleHits, + ) + + helpers.FailOnError(t, err) + assert.Equal(t, ruleHitsInput, ruleHits) +} diff --git a/migration/dvomigrations/dvo_migrations.go b/migration/dvomigrations/dvo_migrations.go index 11bec316f..bc08cf8e0 100644 --- a/migration/dvomigrations/dvo_migrations.go +++ b/migration/dvomigrations/dvo_migrations.go @@ -7,4 +7,5 @@ var UsableDVOMigrations = []migration.Migration{ mig0001CreateDVOReport, mig0002CreateDVOReportIndexes, mig0003CCXDEV12602DeleteBuggyRecords, + mig0004AddRuleHitsCount, } diff --git a/migration/dvomigrations/mig_0004_rule_hits_count_column.go b/migration/dvomigrations/mig_0004_rule_hits_count_column.go new file mode 100644 index 000000000..4de54f6fc --- /dev/null +++ b/migration/dvomigrations/mig_0004_rule_hits_count_column.go @@ -0,0 +1,45 @@ +/* +Copyright © 2020 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dvomigrations + +import ( + "database/sql" + + "github.com/RedHatInsights/insights-results-aggregator/migration" + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +var mig0004AddRuleHitsCount = migration.Migration{ + StepUp: func(tx *sql.Tx, _ types.DBDriver) error { + _, err := tx.Exec(` + ALTER TABLE dvo.dvo_report ADD COLUMN rule_hits_count JSONB DEFAULT '{}'; + `) + if err != nil { + return err + } + + _, err = tx.Exec(` + COMMENT ON COLUMN dvo.dvo_report.rule_hits_count IS 'JSON containing rule IDs and the number of hits for each rule'; + `) + + return err + }, + StepDown: func(tx *sql.Tx, _ types.DBDriver) error { + _, err := tx.Exec(`ALTER TABLE dvo.dvo_report DROP COLUMN IF EXISTS rule_hits_count;`) + return err + }, +} diff --git a/pr_check.sh b/pr_check.sh index 8f6b2a803..5f65b2fd5 100755 --- a/pr_check.sh +++ b/pr_check.sh @@ -28,7 +28,7 @@ REF_ENV="insights-production" # git version of clowdapp.yaml(or any other) file from the pull request. COMPONENT_NAME="ccx-insights-results ccx-redis dvo-writer" # name of app-sre "resourceTemplate" in deploy.yaml for this component IMAGE="quay.io/cloudservices/insights-results-aggregator" -COMPONENTS="ccx-data-pipeline ccx-insights-results ccx-redis dvo-writer dvo-extractor insights-content-service insights-results-smart-proxy ccx-mock-ams" # space-separated list of components to load +COMPONENTS="ccx-data-pipeline ccx-insights-results ccx-redis dvo-writer dvo-extractor insights-content-service ccx-smart-proxy ccx-mock-ams" # space-separated list of components to load COMPONENTS_W_RESOURCES="" # component to keep CACHE_FROM_LATEST_IMAGE="true" DEPLOY_FRONTENDS="false" diff --git a/server/dvo_handlers.go b/server/dvo_handlers.go index fce05a130..89836b33a 100644 --- a/server/dvo_handlers.go +++ b/server/dvo_handlers.go @@ -34,8 +34,6 @@ import ( const ( namespaceIDParam = "namespace" - // RecommendationSuffix is used to strip a suffix from rule ID - RecommendationSuffix = ".recommendation" ) // Cluster structure contains cluster UUID and cluster name @@ -62,10 +60,10 @@ type Metadata struct { // WorkloadsForNamespace structure represents a single entry of the namespace list with some aggregations type WorkloadsForNamespace struct { - Cluster Cluster `json:"cluster"` - Namespace Namespace `json:"namespace"` - Metadata Metadata `json:"metadata"` - RecommendationsHitCount map[string]int `json:"recommendations_hit_count"` + Cluster Cluster `json:"cluster"` + Namespace Namespace `json:"namespace"` + Metadata Metadata `json:"metadata"` + RecommendationsHitCount types.RuleHitsCount `json:"recommendations_hit_count"` } // WorkloadsForCluster structure represents workload for one selected cluster @@ -151,8 +149,13 @@ func (server *HTTPServer) getWorkloads(writer http.ResponseWriter, request *http return } + copyStart := time.Now() + log.Debug().Msg("processing database workloads into response") processedWorkloads := server.processDVOWorkloads(workloads) + log.Debug().Uint32(orgIDStr, uint32(orgID)).Msgf( + "processDVOWorkloads took %s", time.Since(copyStart), + ) log.Debug().Uint32(orgIDStr, uint32(orgID)).Msgf( "getWorkloads took %s", time.Since(tStart), ) @@ -165,7 +168,12 @@ func (server *HTTPServer) getWorkloads(writer http.ResponseWriter, request *http func (server *HTTPServer) processDVOWorkloads(workloads []types.DVOReport) ( processedWorkloads []WorkloadsForNamespace, ) { + log.Debug().Int("workloadsLen", len(workloads)).Msg("Length of the workloads to process") for _, workload := range workloads { + log.Debug().Int("hitCount", len(workload.RuleHitsCount)). + Str("ClusterID", workload.ClusterID).Str("Namespace", workload.NamespaceID). + Msg("Length of the workloads to process") + processedWorkloads = append(processedWorkloads, WorkloadsForNamespace{ Cluster: Cluster{ UUID: workload.ClusterID, @@ -180,7 +188,7 @@ func (server *HTTPServer) processDVOWorkloads(workloads []types.DVOReport) ( ReportedAt: string(workload.ReportedAt), LastCheckedAt: string(workload.LastCheckedAt), }, - // TODO: fill RecommendationsHitCount map efficiently instead of processing the report again every time + RecommendationsHitCount: workload.RuleHitsCount, }) } @@ -302,7 +310,7 @@ func (server *HTTPServer) ProcessSingleDVONamespace(workload types.DVOReport) ( // recommendation.ResponseID doesn't contain the full rule ID, so smart-proxy was unable to retrieve content, we need to build it compositeRuleID, err := generators.GenerateCompositeRuleID( // for some unknown reason, there's a `.recommendation` suffix for each rule hit instead of the usual .report - types.RuleFQDN(strings.TrimSuffix(recommendation.Component, RecommendationSuffix)), + types.RuleFQDN(strings.TrimSuffix(recommendation.Component, types.WorkloadRecommendationSuffix)), types.ErrorKey(recommendation.Key), ) if err != nil { diff --git a/server/dvo_handlers_test.go b/server/dvo_handlers_test.go index c3390d8c2..d3a4da5a2 100644 --- a/server/dvo_handlers_test.go +++ b/server/dvo_handlers_test.go @@ -18,12 +18,17 @@ package server_test import ( "encoding/json" + "fmt" + "net/http" "testing" "time" + "github.com/RedHatInsights/insights-operator-utils/tests/helpers" + "github.com/RedHatInsights/insights-results-aggregator-data/testdata" "github.com/RedHatInsights/insights-results-aggregator/server" - "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" + ira_data "github.com/RedHatInsights/insights-results-aggregator/tests/data" + ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" "github.com/RedHatInsights/insights-results-aggregator/types" "github.com/stretchr/testify/assert" ) @@ -69,20 +74,44 @@ const ( // fixedDvoReportFromDB is what the string inside the report column should look like (after we fix the encoding issues) fixedDvoReportFromDB = `{"system":{"metadata":{},"hostname":null},"fingerprints":[],"version":1,"analysis_metadata":{},"workload_recommendations":[{"response_id":"unset_requirements|DVO_UNSET_REQUIREMENTS","component":"ccx_rules_ocp.external.dvo.unset_requirements.recommendation","key":"DVO_UNSET_REQUIREMENTS","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","uid":"193a2099-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","name":"test-name-0099","uid":"193a2099-1234-5678-916a-d570c9aac158"},{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"Pod","name":"test-name-0001","uid":"193a2099-0000-1111-916a-d570c9aac158"}]},{"response_id":"excluded_pod|EXCLUDED_POD","component":"ccx_rules_ocp.external.dvo.excluded_pod.recommendation","key":"EXCLUDED_POD","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","uid":"12345678-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-B","namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","name":"test-name-1234","uid":"12345678-1234-5678-916a-d570c9aac158"}]}]}` objectBUID = `12345678-1234-5678-916a-d570c9aac158` - namespaceAID = "NAMESPACE-UID-A" - namespaceBID = "NAMESPACE-UID-B" ) +var ( + now = time.Now().UTC() +) + +func workloadInResponseChecker(t testing.TB, expected, got []byte) { + type Response struct { + Status string `json:"string"` + Workloads []server.WorkloadsForNamespace + } + + var expectedResp Response + var gotResp Response + + if err := json.Unmarshal(expected, &expectedResp); err != nil { + err = fmt.Errorf(`"expected" is not JSON. value = "%v", err = "%v"`, expected, err) + helpers.FailOnError(t, err) + } + + if err := json.Unmarshal(got, &gotResp); err != nil { + err = fmt.Errorf(`"got" is not JSON. value = "%v", err = "%v"`, string(got), err) + helpers.FailOnError(t, err) + } + + assert.ElementsMatch(t, expectedResp.Workloads, gotResp.Workloads) +} + // TestProcessSingleDVONamespace_MustProcessEscapedString tests the behavior of ProcessSingleDVONamespace with the current // escaped JSON string, the whole string is also wrapped in quotation marks func TestProcessSingleDVONamespace_MustProcessEscapedString(t *testing.T) { - testServer := server.New(helpers.DefaultServerConfig, nil, nil) + testServer := server.New(ira_helpers.DefaultServerConfig, nil, nil) now := types.Timestamp(time.Now().UTC().Format(time.RFC3339)) dvoReport := types.DVOReport{ OrgID: "1", - NamespaceID: namespaceBID, + NamespaceID: ira_data.NamespaceBUID, NamespaceName: "namespace-name-B", ClusterID: string(testdata.ClusterName), Recommendations: 1, @@ -97,7 +126,7 @@ func TestProcessSingleDVONamespace_MustProcessEscapedString(t *testing.T) { assert.Equal(t, 1, len(processedWorkload.Recommendations)) assert.Equal(t, 1, len(processedWorkload.Recommendations[0].Objects)) assert.Equal(t, objectBUID, processedWorkload.Recommendations[0].Objects[0].UID) - assert.Equal(t, namespaceBID, processedWorkload.Namespace.UUID) + assert.Equal(t, ira_data.NamespaceBUID, processedWorkload.Namespace.UUID) assert.Equal(t, 1, processedWorkload.Metadata.Objects) assert.Equal(t, 1, processedWorkload.Metadata.Recommendations) @@ -110,13 +139,13 @@ func TestProcessSingleDVONamespace_MustProcessEscapedString(t *testing.T) { // correct string (no double escapes, leading, trailing quotes). This test demonstrates that we can fix the encoding // without affecting the API response at all, as the function simply doesn't strip anything from the strings. func TestProcessSingleDVONamespace_MustProcessCorrectString(t *testing.T) { - testServer := server.New(helpers.DefaultServerConfig, nil, nil) + testServer := server.New(ira_helpers.DefaultServerConfig, nil, nil) now := types.Timestamp(time.Now().UTC().Format(time.RFC3339)) dvoReport := types.DVOReport{ OrgID: "1", - NamespaceID: namespaceBID, + NamespaceID: ira_data.NamespaceBUID, NamespaceName: "namespace-name-B", ClusterID: "193a2099-1234-5678-916a-d570c9aac158", Recommendations: 1, @@ -131,7 +160,7 @@ func TestProcessSingleDVONamespace_MustProcessCorrectString(t *testing.T) { assert.Equal(t, 1, len(processedWorkload.Recommendations)) assert.Equal(t, 1, len(processedWorkload.Recommendations[0].Objects)) assert.Equal(t, objectBUID, processedWorkload.Recommendations[0].Objects[0].UID) - assert.Equal(t, namespaceBID, processedWorkload.Namespace.UUID) + assert.Equal(t, ira_data.NamespaceBUID, processedWorkload.Namespace.UUID) assert.Equal(t, 1, processedWorkload.Metadata.Objects) assert.Equal(t, 1, processedWorkload.Metadata.Recommendations) @@ -147,13 +176,13 @@ func TestProcessSingleDVONamespace_MustProcessCorrectString(t *testing.T) { // the requested one. And since we can end up with 0 objects for that rule_ID + namespace after the filtering, // we mustn't show this recommendation in the API, as it has no rule hits in reality. func TestProcessSingleDVONamespace_MustFilterZeroObjects_CCXDEV_12589_Reproducer(t *testing.T) { - testServer := server.New(helpers.DefaultServerConfig, nil, nil) + testServer := server.New(ira_helpers.DefaultServerConfig, nil, nil) now := types.Timestamp(time.Now().UTC().Format(time.RFC3339)) dvoReport := types.DVOReport{ OrgID: "1", - NamespaceID: namespaceAID, + NamespaceID: ira_data.NamespaceAUID, NamespaceName: "namespace-name-A", ClusterID: string(testdata.ClusterName), Recommendations: 1, @@ -178,7 +207,7 @@ func TestProcessSingleDVONamespace_MustFilterZeroObjects_CCXDEV_12589_Reproducer }, } assert.ElementsMatch(t, expectedObjects, processedWorkload.Recommendations[0].Objects) - assert.Equal(t, namespaceAID, processedWorkload.Namespace.UUID) + assert.Equal(t, ira_data.NamespaceAUID, processedWorkload.Namespace.UUID) // check correct metadata as well assert.Equal(t, 2, processedWorkload.Metadata.Objects) @@ -188,3 +217,127 @@ func TestProcessSingleDVONamespace_MustFilterZeroObjects_CCXDEV_12589_Reproducer assert.NoError(t, err) assert.GreaterOrEqual(t, len(string(samples)), 1) } + +func TestGetWorkloadsOK(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(ira_data.ValidReport), + ira_data.ValidDVORecommendation, + now, + now, + now, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + workload := server.WorkloadsForNamespace{ + Cluster: server.Cluster{ + UUID: string(testdata.ClusterName), + }, + Namespace: server.Namespace{ + UUID: ira_data.NamespaceAUID, + Name: "namespace-name-A", + }, + Metadata: server.Metadata{ + Recommendations: 1, + Objects: 1, + ReportedAt: now.UTC().Format(time.RFC3339), + LastCheckedAt: now.UTC().Format(time.RFC3339), + }, + RecommendationsHitCount: types.RuleHitsCount{ + "ccx_rules_ocp.external.dvo.an_issue_pod|DVO_AN_ISSUE": 1, + }, + } + + ira_helpers.AssertAPIRequestDVO(t, mockStorage, nil, &helpers.APIRequest{ + Method: http.MethodGet, + Endpoint: server.DVOWorkloadRecommendations, + EndpointArgs: []interface{}{testdata.OrgID}, + }, &helpers.APIResponse{ + StatusCode: http.StatusOK, + Body: `{"status":"ok","workloads":` + helpers.ToJSONString([]server.WorkloadsForNamespace{workload}) + `}`, + }) +} + +func TestGetWorkloads_NoData(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + ira_helpers.AssertAPIRequestDVO(t, mockStorage, nil, &helpers.APIRequest{ + Method: http.MethodGet, + Endpoint: server.DVOWorkloadRecommendations, + EndpointArgs: []interface{}{testdata.OrgID}, + }, &helpers.APIResponse{ + StatusCode: http.StatusOK, + }) +} + +func TestGetWorkloadsOK_TwoNamespaces(t *testing.T) { + mockStorage, closer := ira_helpers.MustGetPostgresStorageDVO(t, true) + defer closer() + + err := mockStorage.WriteReportForCluster( + testdata.OrgID, + testdata.ClusterName, + types.ClusterReport(ira_data.ValidReport), + ira_data.TwoNamespacesRecommendation, + now, + now, + now, + testdata.RequestID1, + ) + helpers.FailOnError(t, err) + + workloads := []server.WorkloadsForNamespace{ + { + Cluster: server.Cluster{ + UUID: string(testdata.ClusterName), + }, + Namespace: server.Namespace{ + UUID: ira_data.NamespaceAUID, + Name: "namespace-name-A", + }, + Metadata: server.Metadata{ + Recommendations: 1, + Objects: 1, + ReportedAt: now.UTC().Format(time.RFC3339), + LastCheckedAt: now.UTC().Format(time.RFC3339), + }, + RecommendationsHitCount: types.RuleHitsCount{ + "ccx_rules_ocp.external.dvo.an_issue_pod|DVO_AN_ISSUE": 1, + }, + }, + { + Cluster: server.Cluster{ + UUID: string(testdata.ClusterName), + }, + Namespace: server.Namespace{ + UUID: ira_data.NamespaceBUID, + Name: "namespace-name-B", + }, + Metadata: server.Metadata{ + Recommendations: 1, + Objects: 1, + ReportedAt: now.UTC().Format(time.RFC3339), + LastCheckedAt: now.UTC().Format(time.RFC3339), + }, + RecommendationsHitCount: types.RuleHitsCount{ + "ccx_rules_ocp.external.dvo.an_issue_pod|DVO_AN_ISSUE": 1, + }, + }, + } + + ira_helpers.AssertAPIRequestDVO(t, mockStorage, nil, &helpers.APIRequest{ + Method: http.MethodGet, + Endpoint: server.DVOWorkloadRecommendations, + EndpointArgs: []interface{}{testdata.OrgID}, + }, &helpers.APIResponse{ + StatusCode: http.StatusOK, + Body: `{"status":"ok","workloads":` + helpers.ToJSONString(workloads) + `}`, + BodyChecker: workloadInResponseChecker, + }) +} diff --git a/storage/dvo_recommendations_storage.go b/storage/dvo_recommendations_storage.go index 7f36bc200..a36b3fac9 100644 --- a/storage/dvo_recommendations_storage.go +++ b/storage/dvo_recommendations_storage.go @@ -19,10 +19,12 @@ import ( "database/sql" "encoding/json" "fmt" + "strings" "time" "github.com/rs/zerolog/log" + "github.com/RedHatInsights/insights-operator-utils/generators" "github.com/RedHatInsights/insights-results-aggregator/metrics" "github.com/RedHatInsights/insights-results-aggregator/migration" "github.com/RedHatInsights/insights-results-aggregator/migration/dvomigrations" @@ -326,13 +328,13 @@ func (storage DVORecommendationsDBStorage) updateReport( return nil } - namespaceMap, objectsMap, namespaceRecommendationCount := mapWorkloadRecommendations(&recommendations) + namespaceMap, objectsMap, namespaceRecommendationCount, ruleHitsCounts := mapWorkloadRecommendations(&recommendations) // Get the INSERT statement for writing a workload into the database. workloadInsertStatement := storage.getReportInsertQuery() // Get values to be stored in dvo.dvo_report table - values := make([]interface{}, 9) + values := make([]interface{}, 10) for namespaceUID, namespaceName := range namespaceMap { values[0] = orgID // org_id values[1] = clusterName // cluster_id @@ -357,6 +359,9 @@ func (storage DVORecommendationsDBStorage) updateReport( } values[8] = lastCheckedTime // last_checked_at + + values[9] = ruleHitsCounts[namespaceUID] // rule_hits_count + _, err = tx.Exec(workloadInsertStatement, values...) if err != nil { log.Err(err).Msgf("Unable to insert the cluster workloads (org: %v, cluster: %v)", @@ -369,6 +374,30 @@ func (storage DVORecommendationsDBStorage) updateReport( return nil } +// updateRuleHitsCountsForNamespace updates the rule hits for given namespace based on the given recommendation +func updateRuleHitsCountsForNamespace(ruleHitsCounts map[string]types.RuleHitsCount, namespaceUID string, recommendation types.WorkloadRecommendation) { + if _, ok := ruleHitsCounts[namespaceUID]; !ok { + ruleHitsCounts[namespaceUID] = make(types.RuleHitsCount) + } + + // define key in rule hits counts map as concatenation of rule component and key + compositeRuleID, err := generators.GenerateCompositeRuleID( + // for some unknown reason, there's a `.recommendation` suffix for each rule hit instead of the usual .report + types.RuleFQDN(strings.TrimSuffix(recommendation.Component, types.WorkloadRecommendationSuffix)), + types.ErrorKey(recommendation.Key), + ) + if err != nil { + log.Error().Err(err).Msg("error generating composite rule ID for rule") + return + } + + compositeRuleIDString := string(compositeRuleID) + if _, ok := ruleHitsCounts[namespaceUID][compositeRuleIDString]; !ok { + ruleHitsCounts[namespaceUID][compositeRuleIDString] = 0 + } + ruleHitsCounts[namespaceUID][compositeRuleIDString]++ +} + // mapWorkloadRecommendations filters out the data which is grouped by recommendations and aggregates // them by namespace. // Essentially we need to "invert" data from: @@ -377,7 +406,7 @@ func (storage DVORecommendationsDBStorage) updateReport( // to: // - list of namespaces: list of affected workloads and data aggregations for this particular namespace func mapWorkloadRecommendations(recommendations *[]types.WorkloadRecommendation) ( - map[string]string, map[string]int, map[string]int, + map[string]string, map[string]int, map[string]int, map[string]types.RuleHitsCount, ) { // map the namespace ID to the namespace name namespaceMap := make(map[string]string) @@ -385,6 +414,8 @@ func mapWorkloadRecommendations(recommendations *[]types.WorkloadRecommendation) namespaceRecommendationCount := make(map[string]int) // map the number of unique workloads affected by at least 1 rule per namespace objectsPerNamespace := make(map[string]map[string]struct{}) + // map the hit counts per namespace and rule + ruleHitsCounts := make(map[string]types.RuleHitsCount) for _, recommendation := range *recommendations { // objectsMapPerRecommendation is used to calculate number of rule hits in namespace @@ -401,6 +432,8 @@ func mapWorkloadRecommendations(recommendations *[]types.WorkloadRecommendation) // per single recommendation within namespace objectsPerRecommendation[workload.NamespaceUID]++ + updateRuleHitsCountsForNamespace(ruleHitsCounts, workload.NamespaceUID, recommendation) + // per whole namespace; just workload IDs with empty structs to filter out duplicate objects if _, ok := objectsPerNamespace[workload.NamespaceUID]; !ok { objectsPerNamespace[workload.NamespaceUID] = make(map[string]struct{}) @@ -422,7 +455,7 @@ func mapWorkloadRecommendations(recommendations *[]types.WorkloadRecommendation) uniqueObjectsMap[namespace] = len(objects) } - return namespaceMap, uniqueObjectsMap, namespaceRecommendationCount + return namespaceMap, uniqueObjectsMap, namespaceRecommendationCount, ruleHitsCounts } // getRuleKeyCreatedAtMap returns a map between @@ -463,7 +496,7 @@ func (storage DVORecommendationsDBStorage) ReadWorkloadsForOrganization(orgID ty ) { tStart := time.Now() query := ` - SELECT cluster_id, namespace_id, namespace_name, recommendations, objects, reported_at, last_checked_at + SELECT cluster_id, namespace_id, namespace_name, recommendations, objects, reported_at, last_checked_at, rule_hits_count FROM dvo.dvo_report WHERE org_id = $1 ` @@ -493,6 +526,7 @@ func (storage DVORecommendationsDBStorage) ReadWorkloadsForOrganization(orgID ty &dvoReport.Objects, &reportedAtDB, &lastCheckedAtDB, + &dvoReport.RuleHitsCount, ) if err != nil { log.Error().Err(err).Msg("ReadWorkloadsForOrganization") diff --git a/storage/dvo_recommendations_storage_test.go b/storage/dvo_recommendations_storage_test.go index 3b0cf0290..14c6f026f 100644 --- a/storage/dvo_recommendations_storage_test.go +++ b/storage/dvo_recommendations_storage_test.go @@ -32,6 +32,7 @@ import ( "github.com/RedHatInsights/insights-operator-utils/tests/helpers" "github.com/RedHatInsights/insights-results-aggregator-data/testdata" "github.com/RedHatInsights/insights-results-aggregator/storage" + ira_data "github.com/RedHatInsights/insights-results-aggregator/tests/data" ira_helpers "github.com/RedHatInsights/insights-results-aggregator/tests/helpers" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -40,126 +41,6 @@ var ( now = time.Now().UTC() nowAfterOneHour = now.Add(1 * time.Hour).UTC() dummyTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - - namespaceAUID = "NAMESPACE-UID-A" - namespaceBUID = "NAMESPACE-UID-B" - - namespaceAWorkload = types.DVOWorkload{ - Namespace: "namespace-name-A", - NamespaceUID: namespaceAUID, - Kind: "DaemonSet", - Name: "test-name-0099", - UID: "UID-0099", - } - namespaceAWorkload2 = types.DVOWorkload{ - Namespace: "namespace-name-A", - NamespaceUID: namespaceAUID, - Kind: "Pod", - Name: "test-name-0001", - UID: "UID-0001", - } - namespaceBWorkload = types.DVOWorkload{ - Namespace: "namespace-name-B", - NamespaceUID: namespaceBUID, - Kind: "NotDaemonSet", - Name: "test-name-1199", - UID: "UID-1199", - } - validDVORecommendation = []types.WorkloadRecommendation{ - { - ResponseID: "an_issue|DVO_AN_ISSUE", - Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", - Key: "DVO_AN_ISSUE", - Links: types.DVOLinks{ - Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, - ProductDocumentation: []string{}, - }, - Details: map[string]interface{}{ - "check_name": "", - "check_url": "", - "samples": []interface{}{ - map[string]interface{}{ - "namespace_uid": namespaceAUID, "kind": "DaemonSet", "uid": "193a2099-1234-5678-916a-d570c9aac158", - }, - }, - }, - Tags: []string{}, - Workloads: []types.DVOWorkload{namespaceAWorkload}, - }, - } - validReport = `{"system":{"metadata":{},"hostname":null},"fingerprints":[],"version":1,"analysis_metadata":{},"workload_recommendations":[{"response_id":"an_issue|DVO_AN_ISSUE","component":"ccx_rules_ocp.external.dvo.an_issue_pod.recommendation","key":"DVO_AN_ISSUE","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","uid":"193a2099-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","name":"test-name-0099","uid":"UID-0099"}]}]}` - validReport2Rules2Namespaces = `{"system":{"metadata":{},"hostname":null},"fingerprints":[],"version":1,"analysis_metadata":{},"workload_recommendations":[{"response_id":"unset_requirements|DVO_UNSET_REQUIREMENTS","component":"ccx_rules_ocp.external.dvo.unset_requirements.recommendation","key":"DVO_UNSET_REQUIREMENTS","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","uid":"193a2099-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","name":"test-name-0099","uid":"193a2099-1234-5678-916a-d570c9aac158"},{"namespace":"namespace-name-B","namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","name":"test-name-1234","uid":"12345678-1234-5678-916a-d570c9aac158"}]},{"response_id":"excluded_pod|EXCLUDED_POD","component":"ccx_rules_ocp.external.dvo.excluded_pod.recommendation","key":"EXCLUDED_POD","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","uid":"12345678-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-B","namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","name":"test-name-1234","uid":"12345678-1234-5678-916a-d570c9aac158"}]}]}` - - twoNamespacesRecommendation = []types.WorkloadRecommendation{ - { - ResponseID: "an_issue|DVO_AN_ISSUE", - Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", - Key: "DVO_AN_ISSUE", - Links: types.DVOLinks{ - Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, - ProductDocumentation: []string{}, - }, - Details: map[string]interface{}{ - "check_name": "", - "check_url": "", - "samples": []interface{}{ - map[string]interface{}{ - "namespace_uid": namespaceAUID, "kind": "DaemonSet", "uid": "193a2099-1234-5678-916a-d570c9aac158", - }, - }, - }, - Tags: []string{}, - Workloads: []types.DVOWorkload{namespaceAWorkload, namespaceBWorkload}, - }, - } - - recommendation1TwoNamespaces = types.WorkloadRecommendation{ - ResponseID: "an_issue|DVO_AN_ISSUE", - Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", - Key: "DVO_AN_ISSUE", - Links: types.DVOLinks{ - Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, - ProductDocumentation: []string{}, - }, - Details: map[string]interface{}{ - "check_name": "", - "check_url": "", - }, - Tags: []string{}, - Workloads: []types.DVOWorkload{namespaceAWorkload, namespaceBWorkload}, - } - - recommendation2OneNamespace = types.WorkloadRecommendation{ - ResponseID: "unset_requirements|DVO_UNSET_REQUIREMENTS", - Component: "ccx_rules_ocp.external.dvo.unset_requirements.recommendation", - Key: "DVO_UNSET_REQUIREMENTS", - Links: types.DVOLinks{ - Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, - ProductDocumentation: []string{}, - }, - Details: map[string]interface{}{ - "check_name": "", - "check_url": "", - }, - Tags: []string{}, - Workloads: []types.DVOWorkload{namespaceAWorkload, namespaceAWorkload2}, - } - - recommendation3OneNamespace = types.WorkloadRecommendation{ - ResponseID: "bad_requirements|BAD_REQUIREMENTS", - Component: "ccx_rules_ocp.external.dvo.bad_requirements.recommendation", - Key: "BAD_REQUIREMENTS", - Links: types.DVOLinks{ - Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, - ProductDocumentation: []string{}, - }, - Details: map[string]interface{}{ - "check_name": "", - "check_url": "", - }, - Tags: []string{}, - Workloads: []types.DVOWorkload{namespaceAWorkload, namespaceAWorkload2}, - } ) func init() { @@ -242,7 +123,7 @@ func TestDVOStorageWriteReportForClusterClosedStorage(t *testing.T) { testdata.OrgID, testdata.ClusterName, testdata.ClusterReportEmpty, - validDVORecommendation, + ira_data.ValidDVORecommendation, now, dummyTime, dummyTime, @@ -260,7 +141,7 @@ func TestDVOStorageWriteReportForClusterUnsupportedDriverError(t *testing.T) { testdata.OrgID, testdata.ClusterName, testdata.ClusterReportEmpty, - validDVORecommendation, + ira_data.ValidDVORecommendation, now, dummyTime, dummyTime, @@ -283,7 +164,7 @@ func TestDVOStorageWriteReportForClusterMoreRecentInDB(t *testing.T) { testdata.OrgID, testdata.ClusterName, testdata.ClusterReportEmpty, - validDVORecommendation, + ira_data.ValidDVORecommendation, newerTime, dummyTime, dummyTime, @@ -296,7 +177,7 @@ func TestDVOStorageWriteReportForClusterMoreRecentInDB(t *testing.T) { testdata.OrgID, testdata.ClusterName, testdata.ClusterReportEmpty, - validDVORecommendation, + ira_data.ValidDVORecommendation, olderTime, now, now, @@ -320,7 +201,7 @@ func TestDVOStorageWriteReportForClusterDroppedReportTable(t *testing.T) { err = mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, testdata.ClusterReportEmpty, - validDVORecommendation, now, now, now, + ira_data.ValidDVORecommendation, now, now, now, testdata.RequestID1, ) assert.EqualError(t, err, "no such table: dvo.dvo_report") @@ -348,7 +229,7 @@ func TestDVOStorageWriteReportForClusterFakePostgresOK(t *testing.T) { err := mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, `{"test": "report"}`, - validDVORecommendation, testdata.LastCheckedAt, now, now, + ira_data.ValidDVORecommendation, testdata.LastCheckedAt, now, now, testdata.RequestID1) helpers.FailOnError(t, mockStorage.Close()) helpers.FailOnError(t, err) @@ -364,8 +245,8 @@ func TestDVOStorageWriteReportForClusterCheckItIsStored(t *testing.T) { err = mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), - validDVORecommendation, + types.ClusterReport(ira_data.ValidReport), + ira_data.ValidDVORecommendation, now, dummyTime, dummyTime, @@ -377,7 +258,7 @@ func TestDVOStorageWriteReportForClusterCheckItIsStored(t *testing.T) { "SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2;", testdata.OrgID, testdata.ClusterName, ) - checkStoredReport(t, row, namespaceAWorkload, 1, now, now) + checkStoredReport(t, row, ira_data.NamespaceAWorkload, 1, now, now) } func TestDVOStorageWriteReportForClusterCheckPreviousIsDeleted(t *testing.T) { @@ -390,8 +271,8 @@ func TestDVOStorageWriteReportForClusterCheckPreviousIsDeleted(t *testing.T) { err = mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), - twoNamespacesRecommendation, + types.ClusterReport(ira_data.ValidReport), + ira_data.TwoNamespacesRecommendation, now, dummyTime, dummyTime, @@ -403,22 +284,22 @@ func TestDVOStorageWriteReportForClusterCheckPreviousIsDeleted(t *testing.T) { row := mockStorage.GetConnection().QueryRow(` SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2 AND namespace_id = $3;`, - testdata.OrgID, testdata.ClusterName, namespaceAWorkload.NamespaceUID, + testdata.OrgID, testdata.ClusterName, ira_data.NamespaceAWorkload.NamespaceUID, ) - checkStoredReport(t, row, namespaceAWorkload, 1, now, now) + checkStoredReport(t, row, ira_data.NamespaceAWorkload, 1, now, now) row = mockStorage.GetConnection().QueryRow(` SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2 AND namespace_id = $3;`, - testdata.OrgID, testdata.ClusterName, namespaceBWorkload.NamespaceUID, + testdata.OrgID, testdata.ClusterName, ira_data.NamespaceBWorkload.NamespaceUID, ) - checkStoredReport(t, row, namespaceBWorkload, 1, now, now) + checkStoredReport(t, row, ira_data.NamespaceBWorkload, 1, now, now) // Now receive a report with just one namespace for the same cluster err = mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), - validDVORecommendation, + types.ClusterReport(ira_data.ValidReport), + ira_data.ValidDVORecommendation, nowAfterOneHour, dummyTime, dummyTime, @@ -430,13 +311,13 @@ func TestDVOStorageWriteReportForClusterCheckPreviousIsDeleted(t *testing.T) { row = mockStorage.GetConnection().QueryRow(` SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2 AND namespace_id = $3;`, - testdata.OrgID, testdata.ClusterName, namespaceAWorkload.NamespaceUID, + testdata.OrgID, testdata.ClusterName, ira_data.NamespaceAWorkload.NamespaceUID, ) - checkStoredReport(t, row, namespaceAWorkload, 1, nowAfterOneHour, now) + checkStoredReport(t, row, ira_data.NamespaceAWorkload, 1, nowAfterOneHour, now) row = mockStorage.GetConnection().QueryRow(` SELECT namespace_id, namespace_name, report, recommendations, objects, last_checked_at, reported_at FROM dvo.dvo_report WHERE org_id = $1 AND cluster_id = $2 AND namespace_id = $3;`, - testdata.OrgID, testdata.ClusterName, namespaceBWorkload.NamespaceUID, + testdata.OrgID, testdata.ClusterName, ira_data.NamespaceBWorkload.NamespaceUID, ) checkRowDoesntExist(t, row) } @@ -463,7 +344,7 @@ func checkStoredReport(t *testing.T, row *sql.Row, want types.DVOWorkload, wantO assert.Equal(t, want.NamespaceUID, namespaceID, "the column namespace_id is different than expected") assert.Equal(t, want.Namespace, namespaceName, "the column namespace_name is different than expected") - assert.Equal(t, validDVORecommendation, gotWorkloads.WorkloadRecommendations, "the column report is different than expected") + assert.Equal(t, ira_data.ValidDVORecommendation, gotWorkloads.WorkloadRecommendations, "the column report is different than expected") assert.Equal(t, 1, recommendations, "the column recommendations is different than expected") assert.Equal(t, wantObjects, objects, "the column objects is different than expected") assert.Equal(t, wantLastChecked.Truncate(time.Second), lastChecked.UTC().Truncate(time.Second), "the column reported_at is different than expected") @@ -493,8 +374,8 @@ func TestDVOStorageReadWorkloadsForOrganization(t *testing.T) { err := mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), - twoNamespacesRecommendation, + types.ClusterReport(ira_data.ValidReport), + ira_data.TwoNamespacesRecommendation, now, now, now, @@ -506,8 +387,8 @@ func TestDVOStorageReadWorkloadsForOrganization(t *testing.T) { err = mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), - twoNamespacesRecommendation, + types.ClusterReport(ira_data.ValidReport), + ira_data.TwoNamespacesRecommendation, nowAfterOneHour, nowAfterOneHour, nowAfterOneHour, @@ -519,8 +400,13 @@ func TestDVOStorageReadWorkloadsForOrganization(t *testing.T) { helpers.FailOnError(t, err) assert.Equal(t, testdata.ClusterName, types.ClusterName(workloads[0].ClusterID)) + assert.Equal(t, testdata.ClusterName, types.ClusterName(workloads[1].ClusterID)) assert.Equal(t, types.Timestamp(nowAfterOneHour.UTC().Format(time.RFC3339)), workloads[0].LastCheckedAt) + assert.Equal(t, types.Timestamp(nowAfterOneHour.UTC().Format(time.RFC3339)), workloads[1].LastCheckedAt) assert.Equal(t, types.Timestamp(now.UTC().Format(time.RFC3339)), workloads[0].ReportedAt) + assert.Equal(t, types.Timestamp(now.UTC().Format(time.RFC3339)), workloads[1].ReportedAt) + assert.Equal(t, types.RuleHitsCount{"ccx_rules_ocp.external.dvo.an_issue_pod|DVO_AN_ISSUE": 1}, workloads[0].RuleHitsCount) + assert.Equal(t, types.RuleHitsCount{"ccx_rules_ocp.external.dvo.an_issue_pod|DVO_AN_ISSUE": 1}, workloads[1].RuleHitsCount) } // TestDVOStorageReadWorkloadsForNamespace tests timestamps being kept correctly @@ -531,8 +417,8 @@ func TestDVOStorageReadWorkloadsForNamespace_Timestamps(t *testing.T) { err := mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), - twoNamespacesRecommendation, + types.ClusterReport(ira_data.ValidReport), + ira_data.TwoNamespacesRecommendation, now, now, now, @@ -544,8 +430,8 @@ func TestDVOStorageReadWorkloadsForNamespace_Timestamps(t *testing.T) { err = mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), - twoNamespacesRecommendation, + types.ClusterReport(ira_data.ValidReport), + ira_data.TwoNamespacesRecommendation, nowAfterOneHour, nowAfterOneHour, nowAfterOneHour, @@ -553,11 +439,11 @@ func TestDVOStorageReadWorkloadsForNamespace_Timestamps(t *testing.T) { ) helpers.FailOnError(t, err) - report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, ira_data.NamespaceAUID) helpers.FailOnError(t, err) assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) - assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, ira_data.NamespaceAUID, report.NamespaceID) assert.Equal(t, uint(1), report.Recommendations) assert.Equal(t, uint(1), report.Objects) assert.Equal(t, types.Timestamp(nowAfterOneHour.UTC().Format(time.RFC3339)), report.LastCheckedAt) @@ -575,8 +461,8 @@ func TestDVOStorageReadWorkloadsForNamespace_TwoObjectsOneNamespace(t *testing.T err := mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), - twoNamespacesRecommendation, + types.ClusterReport(ira_data.ValidReport), + ira_data.TwoNamespacesRecommendation, now, now, now, @@ -584,23 +470,23 @@ func TestDVOStorageReadWorkloadsForNamespace_TwoObjectsOneNamespace(t *testing.T ) helpers.FailOnError(t, err) - report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, ira_data.NamespaceAUID) helpers.FailOnError(t, err) assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) - assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, ira_data.NamespaceAUID, report.NamespaceID) assert.Equal(t, uint(1), report.Recommendations) assert.Equal(t, uint(1), report.Objects) assert.Equal(t, nowTstmp, report.ReportedAt) assert.Equal(t, nowTstmp, report.LastCheckedAt) - newerReport2Objs := twoNamespacesRecommendation - newerReport2Objs[0].Workloads = []types.DVOWorkload{namespaceAWorkload, namespaceAWorkload2, namespaceBWorkload} + newerReport2Objs := ira_data.TwoNamespacesRecommendation + newerReport2Objs[0].Workloads = []types.DVOWorkload{ira_data.NamespaceAWorkload, ira_data.NamespaceAWorkload2, ira_data.NamespaceBWorkload} // write new archive with newer timestamp and 1 more object in the recommendation hit err = mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), + types.ClusterReport(ira_data.ValidReport), newerReport2Objs, nowAfterOneHour, nowAfterOneHour, @@ -609,11 +495,11 @@ func TestDVOStorageReadWorkloadsForNamespace_TwoObjectsOneNamespace(t *testing.T ) helpers.FailOnError(t, err) - report, err = mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + report, err = mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, ira_data.NamespaceAUID) helpers.FailOnError(t, err) assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) - assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, ira_data.NamespaceAUID, report.NamespaceID) assert.Equal(t, uint(1), report.Recommendations) assert.Equal(t, uint(2), report.Objects) // <-- two objs now @@ -633,8 +519,8 @@ func TestDVOStorageWriteReport_TwoNamespacesTwoRecommendations(t *testing.T) { err := mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport2Rules2Namespaces), - []types.WorkloadRecommendation{recommendation1TwoNamespaces, recommendation2OneNamespace}, + types.ClusterReport(ira_data.ValidReport2Rules2Namespaces), + []types.WorkloadRecommendation{ira_data.Recommendation1TwoNamespaces, ira_data.Recommendation2OneNamespace}, now, now, now, @@ -644,22 +530,30 @@ func TestDVOStorageWriteReport_TwoNamespacesTwoRecommendations(t *testing.T) { expectedWorkloads := []types.DVOReport{ { - NamespaceID: namespaceAUID, - NamespaceName: namespaceAWorkload.Namespace, + NamespaceID: ira_data.NamespaceAUID, + NamespaceName: ira_data.NamespaceAWorkload.Namespace, ClusterID: string(testdata.ClusterName), Recommendations: uint(2), Objects: uint(2), // <-- must be 2, because one workload is hitting more recommendations, but counts as 1 ReportedAt: nowTstmp, LastCheckedAt: nowTstmp, + RuleHitsCount: types.RuleHitsCount{ + "ccx_rules_ocp.external.dvo.an_issue_pod|DVO_AN_ISSUE": 1, + "ccx_rules_ocp.external.dvo.unset_requirements|DVO_UNSET_REQUIREMENTS": 2, + }, }, { - NamespaceID: namespaceBUID, - NamespaceName: namespaceBWorkload.Namespace, + NamespaceID: ira_data.NamespaceBUID, + NamespaceName: ira_data.NamespaceBWorkload.Namespace, ClusterID: string(testdata.ClusterName), Recommendations: uint(1), // <-- must contain only 1 rule, the other rule wasn't hitting this ns Objects: uint(1), ReportedAt: nowTstmp, LastCheckedAt: nowTstmp, + RuleHitsCount: types.RuleHitsCount{ + "ccx_rules_ocp.external.dvo.an_issue_pod|DVO_AN_ISSUE": 1, + "ccx_rules_ocp.external.dvo.unset_requirements|DVO_UNSET_REQUIREMENTS": 2, + }, }, } @@ -669,11 +563,11 @@ func TestDVOStorageWriteReport_TwoNamespacesTwoRecommendations(t *testing.T) { assert.Equal(t, 2, len(workloads)) assert.ElementsMatch(t, expectedWorkloads, workloads) - report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, ira_data.NamespaceAUID) helpers.FailOnError(t, err) assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) - assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, ira_data.NamespaceAUID, report.NamespaceID) assert.Equal(t, uint(2), report.Recommendations) assert.Equal(t, uint(2), report.Objects) assert.Equal(t, nowTstmp, report.ReportedAt) @@ -691,11 +585,11 @@ func TestDVOStorageWriteReport_FilterOutDuplicateObjects_CCXDEV_12608_Reproducer err := mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport2Rules2Namespaces), + types.ClusterReport(ira_data.ValidReport2Rules2Namespaces), []types.WorkloadRecommendation{ - recommendation1TwoNamespaces, - recommendation2OneNamespace, - recommendation3OneNamespace, + ira_data.Recommendation1TwoNamespaces, + ira_data.Recommendation2OneNamespace, + ira_data.Recommendation3OneNamespace, }, now, now, @@ -706,22 +600,32 @@ func TestDVOStorageWriteReport_FilterOutDuplicateObjects_CCXDEV_12608_Reproducer expectedWorkloads := []types.DVOReport{ { - NamespaceID: namespaceAUID, - NamespaceName: namespaceAWorkload.Namespace, + NamespaceID: ira_data.NamespaceAUID, + NamespaceName: ira_data.NamespaceAWorkload.Namespace, ClusterID: string(testdata.ClusterName), Recommendations: uint(3), Objects: uint(2), // <-- must be 2, because workloadA and workloadB are hitting more rules, but count as 1 within a namespace ReportedAt: nowTstmp, LastCheckedAt: nowTstmp, + RuleHitsCount: types.RuleHitsCount{ + "ccx_rules_ocp.external.dvo.an_issue_pod|DVO_AN_ISSUE": 1, + "ccx_rules_ocp.external.dvo.unset_requirements|DVO_UNSET_REQUIREMENTS": 2, + "ccx_rules_ocp.external.dvo.bad_requirements|BAD_REQUIREMENTS": 2, + }, }, { - NamespaceID: namespaceBUID, - NamespaceName: namespaceBWorkload.Namespace, + NamespaceID: ira_data.NamespaceBUID, + NamespaceName: ira_data.NamespaceBWorkload.Namespace, ClusterID: string(testdata.ClusterName), Recommendations: uint(1), // <-- must contain only 1 rule, the other rules weren't affecting this namespace Objects: uint(1), // <-- same as ^ ReportedAt: nowTstmp, LastCheckedAt: nowTstmp, + RuleHitsCount: types.RuleHitsCount{ + "ccx_rules_ocp.external.dvo.an_issue_pod|DVO_AN_ISSUE": 1, + "ccx_rules_ocp.external.dvo.unset_requirements|DVO_UNSET_REQUIREMENTS": 2, + "ccx_rules_ocp.external.dvo.bad_requirements|BAD_REQUIREMENTS": 2, + }, }, } @@ -731,11 +635,11 @@ func TestDVOStorageWriteReport_FilterOutDuplicateObjects_CCXDEV_12608_Reproducer assert.Equal(t, 2, len(workloads)) assert.ElementsMatch(t, expectedWorkloads, workloads) - report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, ira_data.NamespaceAUID) helpers.FailOnError(t, err) assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) - assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, ira_data.NamespaceAUID, report.NamespaceID) assert.Equal(t, uint(3), report.Recommendations) assert.Equal(t, uint(2), report.Objects) assert.Equal(t, nowTstmp, report.ReportedAt) @@ -751,8 +655,8 @@ func TestDVOStorageReadWorkloadsForNamespace_MissingData(t *testing.T) { err := mockStorage.WriteReportForCluster( testdata.OrgID, testdata.ClusterName, - types.ClusterReport(validReport), - validDVORecommendation, + types.ClusterReport(ira_data.ValidReport), + ira_data.ValidDVORecommendation, now, now, now, @@ -761,20 +665,20 @@ func TestDVOStorageReadWorkloadsForNamespace_MissingData(t *testing.T) { helpers.FailOnError(t, err) t.Run("cluster and namespace exist", func(t *testing.T) { - report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceAUID) + report, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, ira_data.NamespaceAUID) helpers.FailOnError(t, err) assert.Equal(t, testdata.ClusterName, types.ClusterName(report.ClusterID)) - assert.Equal(t, namespaceAUID, report.NamespaceID) + assert.Equal(t, ira_data.NamespaceAUID, report.NamespaceID) }) t.Run("cluster exists and namespace doesn't", func(t *testing.T) { - _, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, namespaceBUID) - assert.Equal(t, &types.ItemNotFoundError{ItemID: fmt.Sprintf("%d:%s:%s", testdata.OrgID, testdata.ClusterName, namespaceBUID)}, err) + _, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, testdata.ClusterName, ira_data.NamespaceBUID) + assert.Equal(t, &types.ItemNotFoundError{ItemID: fmt.Sprintf("%d:%s:%s", testdata.OrgID, testdata.ClusterName, ira_data.NamespaceBUID)}, err) }) t.Run("namespace exists and cluster doesn't", func(t *testing.T) { nonExistingCluster := types.ClusterName("a6fe3cd2-2c6a-48b8-a58d-b05853d47f4f") - _, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, nonExistingCluster, namespaceAUID) - assert.Equal(t, &types.ItemNotFoundError{ItemID: fmt.Sprintf("%d:%s:%s", testdata.OrgID, nonExistingCluster, namespaceAUID)}, err) + _, err := mockStorage.ReadWorkloadsForClusterAndNamespace(testdata.OrgID, nonExistingCluster, ira_data.NamespaceAUID) + assert.Equal(t, &types.ItemNotFoundError{ItemID: fmt.Sprintf("%d:%s:%s", testdata.OrgID, nonExistingCluster, ira_data.NamespaceAUID)}, err) }) } diff --git a/storage/noop_dvo_recommendations_storage_test.go b/storage/noop_dvo_recommendations_storage_test.go index d364c8d47..abe6e2b9c 100644 --- a/storage/noop_dvo_recommendations_storage_test.go +++ b/storage/noop_dvo_recommendations_storage_test.go @@ -19,6 +19,7 @@ import ( "time" "github.com/RedHatInsights/insights-results-aggregator/storage" + ira_data "github.com/RedHatInsights/insights-results-aggregator/tests/data" "github.com/RedHatInsights/insights-results-aggregator/types" ) @@ -38,5 +39,5 @@ func TestDVONoopStorageEmptyMethods(_ *testing.T) { _ = noopStorage.GetConnection() _ = noopStorage.GetDBDriverType() - _ = noopStorage.WriteReportForCluster(0, "", "", validDVORecommendation, time.Now(), time.Now(), time.Now(), "") + _ = noopStorage.WriteReportForCluster(0, "", "", ira_data.ValidDVORecommendation, time.Now(), time.Now(), time.Now(), "") } diff --git a/storage/ocp_recommendations_storage_test.go b/storage/ocp_recommendations_storage_test.go index 4866e19d1..f22debc37 100644 --- a/storage/ocp_recommendations_storage_test.go +++ b/storage/ocp_recommendations_storage_test.go @@ -421,7 +421,7 @@ func TestDBStorageWriteReportForClusterExecError(t *testing.T) { ) assert.Error(t, err) - const postgresErrMessage = "pq: invalid input syntax for type integer" + const postgresErrMessage = "pq: invalid input syntax" if !strings.HasPrefix(err.Error(), postgresErrMessage) { t.Fatalf("expected: \n%v\ngot:\n%v", postgresErrMessage, err.Error()) } diff --git a/storage/queries.go b/storage/queries.go index 38c8b8b46..fdc36f1dc 100644 --- a/storage/queries.go +++ b/storage/queries.go @@ -34,7 +34,7 @@ func (storage OCPRecommendationsDBStorage) getReportInfoUpsertQuery() string { func (storage DVORecommendationsDBStorage) getReportInsertQuery() string { return ` - INSERT INTO dvo.dvo_report(org_id, cluster_id, namespace_id, namespace_name, report, recommendations, objects, reported_at, last_checked_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + INSERT INTO dvo.dvo_report(org_id, cluster_id, namespace_id, namespace_name, report, recommendations, objects, reported_at, last_checked_at, rule_hits_count) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) ` } diff --git a/storage/storage_rules_test.go b/storage/storage_rules_test.go index d4d0f980d..9d7e7e673 100644 --- a/storage/storage_rules_test.go +++ b/storage/storage_rules_test.go @@ -516,7 +516,7 @@ func TestDBStorageVoteOnRuleDBExecError(t *testing.T) { err = mockStorage.VoteOnRule("non int", testdata.Rule1ID, testdata.ErrorKey1, testdata.OrgID, testdata.UserID, types.UserVoteNone, "") assert.Error(t, err) - const postgresErrMessage = "pq: invalid input syntax for type integer: \"non int\"" + const postgresErrMessage = "pq: invalid input syntax" if !strings.HasPrefix(err.Error(), postgresErrMessage) { t.Fatalf("expected : \n%v\ngot:\n%v", postgresErrMessage, err.Error()) } diff --git a/tests/data/dvo_data.go b/tests/data/dvo_data.go new file mode 100644 index 000000000..d3276dcba --- /dev/null +++ b/tests/data/dvo_data.go @@ -0,0 +1,150 @@ +/* +Copyright © 2024 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testdata + +import ( + "github.com/RedHatInsights/insights-results-aggregator/types" +) + +var ( + // NamespaceAUID represents namespace A + NamespaceAUID = "NAMESPACE-UID-A" + // NamespaceBUID represents namespace A + NamespaceBUID = "NAMESPACE-UID-B" + // NamespaceAWorkload workload for namespace A + NamespaceAWorkload = types.DVOWorkload{ + Namespace: "namespace-name-A", + NamespaceUID: NamespaceAUID, + Kind: "DaemonSet", + Name: "test-name-0099", + UID: "UID-0099", + } + // NamespaceAWorkload2 another workload for namespace A + NamespaceAWorkload2 = types.DVOWorkload{ + Namespace: "namespace-name-A", + NamespaceUID: NamespaceAUID, + Kind: "Pod", + Name: "test-name-0001", + UID: "UID-0001", + } + // NamespaceBWorkload workload for namespace B + NamespaceBWorkload = types.DVOWorkload{ + Namespace: "namespace-name-B", + NamespaceUID: NamespaceBUID, + Kind: "NotDaemonSet", + Name: "test-name-1199", + UID: "UID-1199", + } + // ValidDVORecommendation to be inserted into DB + ValidDVORecommendation = []types.WorkloadRecommendation{ + { + ResponseID: "an_issue|DVO_AN_ISSUE", + Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", + Key: "DVO_AN_ISSUE", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + "samples": []interface{}{ + map[string]interface{}{ + "namespace_uid": NamespaceAUID, "kind": "DaemonSet", "uid": "193a2099-1234-5678-916a-d570c9aac158", + }, + }, + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{NamespaceAWorkload}, + }, + } + // ValidReport is a full report inserted into `report` column + ValidReport = `{"system":{"metadata":{},"hostname":null},"fingerprints":[],"version":1,"analysis_metadata":{},"workload_recommendations":[{"response_id":"an_issue|DVO_AN_ISSUE","component":"ccx_rules_ocp.external.dvo.an_issue_pod.recommendation","key":"DVO_AN_ISSUE","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","uid":"193a2099-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","name":"test-name-0099","uid":"UID-0099"}]}]}` + // ValidReport2Rules2Namespaces is a full report inserted into `report` column with 2 rules and 2 namespaces + ValidReport2Rules2Namespaces = `{"system":{"metadata":{},"hostname":null},"fingerprints":[],"version":1,"analysis_metadata":{},"workload_recommendations":[{"response_id":"unset_requirements|DVO_UNSET_REQUIREMENTS","component":"ccx_rules_ocp.external.dvo.unset_requirements.recommendation","key":"DVO_UNSET_REQUIREMENTS","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","uid":"193a2099-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-A","namespace_uid":"NAMESPACE-UID-A","kind":"DaemonSet","name":"test-name-0099","uid":"193a2099-1234-5678-916a-d570c9aac158"},{"namespace":"namespace-name-B","namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","name":"test-name-1234","uid":"12345678-1234-5678-916a-d570c9aac158"}]},{"response_id":"excluded_pod|EXCLUDED_POD","component":"ccx_rules_ocp.external.dvo.excluded_pod.recommendation","key":"EXCLUDED_POD","details":{"check_name":"","check_url":"","samples":[{"namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","uid":"12345678-1234-5678-916a-d570c9aac158"}]},"tags":[],"links":{"jira":["https://issues.redhat.com/browse/AN_ISSUE"],"product_documentation":[]},"workloads":[{"namespace":"namespace-name-B","namespace_uid":"NAMESPACE-UID-B","kind":"DaemonSet","name":"test-name-1234","uid":"12345678-1234-5678-916a-d570c9aac158"}]}]}` + // TwoNamespacesRecommendation to be inserted into DB with 2 namespaces + TwoNamespacesRecommendation = []types.WorkloadRecommendation{ + { + ResponseID: "an_issue|DVO_AN_ISSUE", + Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", + Key: "DVO_AN_ISSUE", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + "samples": []interface{}{ + map[string]interface{}{ + "namespace_uid": NamespaceAUID, "kind": "DaemonSet", "uid": "193a2099-1234-5678-916a-d570c9aac158", + }, + }, + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{NamespaceAWorkload, NamespaceBWorkload}, + }, + } + // Recommendation1TwoNamespaces with 2 namespaces 1 rule + Recommendation1TwoNamespaces = types.WorkloadRecommendation{ + ResponseID: "an_issue|DVO_AN_ISSUE", + Component: "ccx_rules_ocp.external.dvo.an_issue_pod.recommendation", + Key: "DVO_AN_ISSUE", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{NamespaceAWorkload, NamespaceBWorkload}, + } + // Recommendation2OneNamespace with 1 namespace 2 rules + Recommendation2OneNamespace = types.WorkloadRecommendation{ + ResponseID: "unset_requirements|DVO_UNSET_REQUIREMENTS", + Component: "ccx_rules_ocp.external.dvo.unset_requirements.recommendation", + Key: "DVO_UNSET_REQUIREMENTS", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{NamespaceAWorkload, NamespaceAWorkload2}, + } + // Recommendation3OneNamespace with 1 namespace 3 rules + Recommendation3OneNamespace = types.WorkloadRecommendation{ + ResponseID: "bad_requirements|BAD_REQUIREMENTS", + Component: "ccx_rules_ocp.external.dvo.bad_requirements.recommendation", + Key: "BAD_REQUIREMENTS", + Links: types.DVOLinks{ + Jira: []string{"https://issues.redhat.com/browse/AN_ISSUE"}, + ProductDocumentation: []string{}, + }, + Details: map[string]interface{}{ + "check_name": "", + "check_url": "", + }, + Tags: []string{}, + Workloads: []types.DVOWorkload{NamespaceAWorkload, NamespaceAWorkload2}, + } +) diff --git a/tests/helpers/http.go b/tests/helpers/http.go index 498923c8c..b6444ff62 100644 --- a/tests/helpers/http.go +++ b/tests/helpers/http.go @@ -96,3 +96,25 @@ func AssertAPIRequest( helpers.AssertAPIRequest(t, testServer, serverConfig.APIPrefix, request, expectedResponse) } + +// AssertAPIRequestDVO is used for requests made against DVO storage +func AssertAPIRequestDVO( + t testing.TB, + mockStorage storage.DVORecommendationsStorage, + serverConfig *server.Configuration, + request *helpers.APIRequest, + expectedResponse *helpers.APIResponse, +) { + if mockStorage == nil { + var closer func() + mockStorage, closer = MustGetPostgresStorageDVO(t, true) + defer closer() + } + if serverConfig == nil { + serverConfig = &DefaultServerConfig + } + + testServer := server.New(*serverConfig, nil, mockStorage) + + helpers.AssertAPIRequest(t, testServer, serverConfig.APIPrefix, request, expectedResponse) +} diff --git a/tests/helpers/mock_storage.go b/tests/helpers/mock_storage.go index cb31890a2..888569dff 100644 --- a/tests/helpers/mock_storage.go +++ b/tests/helpers/mock_storage.go @@ -17,7 +17,6 @@ package helpers import ( "database/sql" "fmt" - "os" "strings" "testing" @@ -116,8 +115,6 @@ func MustCloseMockDBWithExpects( // MustGetPostgresStorage creates test postgres storage with credentials from config-devel func MustGetPostgresStorage(tb testing.TB, init bool) (storage.OCPRecommendationsStorage, func()) { - dbAdminPassword := os.Getenv("INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS") - err := conf.LoadConfiguration("../config-devel") helpers.FailOnError(tb, err) @@ -125,7 +122,6 @@ func MustGetPostgresStorage(tb testing.TB, init bool) (storage.OCPRecommendation storageConf := &conf.Config.OCPRecommendationsStorage storageConf.Driver = postgres storageConf.PGDBName += "_test_db_" + strings.ReplaceAll(uuid.New().String(), "-", "_") - storageConf.PGPassword = dbAdminPassword connString := fmt.Sprintf( "host=%s port=%d user=%s password=%s sslmode=disable", @@ -161,8 +157,6 @@ func MustGetPostgresStorage(tb testing.TB, init bool) (storage.OCPRecommendation // MustGetPostgresStorageDVO creates test postgres storage with credentials from config-devel for DVO storage func MustGetPostgresStorageDVO(tb testing.TB, init bool) (storage.DVORecommendationsStorage, func()) { - dbAdminPassword := os.Getenv("INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS") - err := conf.LoadConfiguration("../config-devel") helpers.FailOnError(tb, err) @@ -170,7 +164,6 @@ func MustGetPostgresStorageDVO(tb testing.TB, init bool) (storage.DVORecommendat storageConf := &conf.Config.DVORecommendationsStorage storageConf.Driver = postgres storageConf.PGDBName += "_test_db_" + strings.ReplaceAll(uuid.New().String(), "-", "_") - storageConf.PGPassword = dbAdminPassword connString := fmt.Sprintf( "host=%s port=%d user=%s password=%s sslmode=disable", diff --git a/types/types.go b/types/types.go index b40327798..1770a07b1 100644 --- a/types/types.go +++ b/types/types.go @@ -17,6 +17,9 @@ package types import ( + "database/sql/driver" + "encoding/json" + "errors" "time" types "github.com/RedHatInsights/insights-results-types" @@ -54,6 +57,8 @@ const ( UserVoteNone = types.UserVoteNone // UserVoteLike shows user's like UserVoteLike = types.UserVoteLike + // WorkloadRecommendationSuffix is used to strip a suffix from rule ID (Component attribute) in WorkloadRecommendation + WorkloadRecommendationSuffix = ".recommendation" ) type ( @@ -202,6 +207,7 @@ type DVOReport struct { Objects uint `json:"objects"` ReportedAt types.Timestamp `json:"reported_at"` LastCheckedAt types.Timestamp `json:"last_checked_at"` + RuleHitsCount RuleHitsCount `json:"rule_hits_count"` } // ClusterReports is a data structure containing list of clusters, list of @@ -221,3 +227,21 @@ type RuleRating = types.RuleRating type Metadata struct { GatheredAt time.Time `json:"gathering_time"` } + +// RuleHitsCount represents the number of hits for a given rule +type RuleHitsCount map[string]int + +// Value convert a RuleHitsCount into a byte[] +func (in RuleHitsCount) Value() (driver.Value, error) { + return json.Marshal(in) +} + +// Scan parses a byte[] value into a RuleHitsCount +func (in *RuleHitsCount) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.New("not byte array") + } + + return json.Unmarshal(b, &in) +} diff --git a/unit-tests.sh b/unit-tests.sh index 67e326a51..6b6fc6f24 100755 --- a/unit-tests.sh +++ b/unit-tests.sh @@ -24,7 +24,7 @@ function run_unit_tests() { run_cmd="$TEST_TO_RUN" fi # shellcheck disable=SC2046 - if ! go test -timeout 5m -coverprofile coverage.out $run_cmd + if ! go test -timeout 10m -coverprofile coverage.out $run_cmd then echo "unit tests failed" exit 1 @@ -32,7 +32,9 @@ function run_unit_tests() { } function check_composer() { - if command -v docker-compose > /dev/null; then + if command -v docker > /dev/null && docker compose version > /dev/null 2>&1; then + COMPOSER="docker compose" + elif command -v docker-compose > /dev/null; then COMPOSER=docker-compose elif command -v podman-compose > /dev/null; then COMPOSER=podman-compose @@ -44,22 +46,26 @@ function check_composer() { function wait_for_postgres() { - until psql "dbname=aggregator user=postgres password=postgres host=localhost sslmode=disable" -c '\q' ; do + echo -n "Waiting for postgres to be available" + until psql "dbname=aggregator user=postgres password=postgres host=localhost sslmode=disable" -c '\q' 2> /dev/null; do + echo -n "." sleep 1 done + + echo + echo "Postgres already available. Continuing" } if [ -z "$CI" ]; then echo "Running postgres container locally" check_composer + echo \"$COMPOSER\" to be used as composer $COMPOSER up -d > /dev/null wait_for_postgres fi path_to_config=$(pwd)/config-devel.toml export INSIGHTS_RESULTS_AGGREGATOR_CONFIG_FILE="$path_to_config" -export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB="aggregator" -export INSIGHTS_RESULTS_AGGREGATOR__TESTS_DB_ADMIN_PASS="postgres" run_unit_tests if [ -z "$CI" ]; then