diff --git a/.github/workflows/ci-cd-yt01.yml b/.github/workflows/ci-cd-yt01.yml index fe0b89722..d7830ff9c 100644 --- a/.github/workflows/ci-cd-yt01.yml +++ b/.github/workflows/ci-cd-yt01.yml @@ -134,9 +134,6 @@ jobs: secrets: TOKEN_GENERATOR_USERNAME: ${{ secrets.TOKEN_GENERATOR_USERNAME }} TOKEN_GENERATOR_PASSWORD: ${{ secrets.TOKEN_GENERATOR_PASSWORD }} - K6_CLOUD_TOKEN: ${{ secrets.K6_CLOUD_TOKEN }} - K6_CLOUD_PROJECT_ID: ${{ secrets.K6_CLOUD_PROJECT_ID }} - strategy: max-parallel: 1 matrix: @@ -150,8 +147,6 @@ jobs: apiVersion: v1 vus: 1 duration: 30s - tokens: both - numberOfTokens: 100 testSuitePath: ${{ matrix.files }} permissions: checks: write diff --git a/.github/workflows/dispatch-k6-performance.yml b/.github/workflows/dispatch-k6-performance.yml index 87dbd26a8..020d1299a 100644 --- a/.github/workflows/dispatch-k6-performance.yml +++ b/.github/workflows/dispatch-k6-performance.yml @@ -16,16 +16,6 @@ on: - test - staging - yt01 - tokens: - description: 'Tokens to generate; for create dialog, search, none, or both' - required: true - default: 'both' - type: choice - options: - - both - - enterprise - - personal - - none tag: description: 'tag the performance test' required: true @@ -52,6 +42,7 @@ on: - 'tests/k6/tests/serviceowner/performance/serviceowner-search.js' - 'tests/k6/tests/enduser/performance/enduser-search.js' - 'tests/k6/tests/graphql/performance/graphql-search.js' + - 'tests/k6/tests/serviceowner/performance/create-transmissions.js' run-name: ${{ inputs.tag }} vus ${{ inputs.vus }} duration ${{ inputs.duration }} jobs: @@ -61,13 +52,10 @@ jobs: secrets: TOKEN_GENERATOR_USERNAME: ${{ secrets.TOKEN_GENERATOR_USERNAME }} TOKEN_GENERATOR_PASSWORD: ${{ secrets.TOKEN_GENERATOR_PASSWORD }} - K6_CLOUD_TOKEN: ${{ secrets.K6_CLOUD_TOKEN }} - K6_CLOUD_PROJECT_ID: ${{ secrets.K6_CLOUD_PROJECT_ID }} with: environment: ${{ inputs.environment }} apiVersion: ${{ inputs.apiVersion }} testSuitePath: ${{ inputs.testSuitePath }} vus: ${{ fromJson(inputs.vus) }} duration: ${{ inputs.duration }} - tokens: ${{ inputs.tokens }} diff --git a/.github/workflows/workflow-run-k6-performance.yml b/.github/workflows/workflow-run-k6-performance.yml index 88a839e6c..23a320adc 100644 --- a/.github/workflows/workflow-run-k6-performance.yml +++ b/.github/workflows/workflow-run-k6-performance.yml @@ -18,27 +18,11 @@ on: duration: required: true type: string - tokens: - required: true - type: string - numberOfTokens: - required: false - type: number - default: 0 - ttl: - required: false - type: number - default: 3600 secrets: TOKEN_GENERATOR_USERNAME: required: true TOKEN_GENERATOR_PASSWORD: required: true - K6_CLOUD_TOKEN: - required: true - K6_CLOUD_PROJECT_ID: - required: true - jobs: k6-test: runs-on: ubuntu-latest @@ -53,7 +37,6 @@ jobs: uses: grafana/setup-k6-action@v1 - name: Run K6 tests (${{ inputs.testSuitePath }}) run: | - ./tests/k6/tests/scripts/generate_tokens.sh ./tests/k6/tests/performancetest_data ${{ inputs.tokens }} ${{ inputs.numberOfTokens }} ${{ inputs.ttl }} echo "Running k6 test suite ${{ inputs.testSuitePath }} with ${{ inputs.vus }} VUs for ${{ inputs.duration }}" k6 run ${{ inputs.testSuitePath }} --quiet --log-output=stdout --include-system-env-vars \ --vus=${{ inputs.vus }} --duration=${{ inputs.duration }} --out csv=./results.csv @@ -63,5 +46,3 @@ jobs: API_VERSION: ${{ inputs.apiVersion }} TOKEN_GENERATOR_USERNAME: ${{ secrets.TOKEN_GENERATOR_USERNAME }} TOKEN_GENERATOR_PASSWORD: ${{ secrets.TOKEN_GENERATOR_PASSWORD }} - K6_CLOUD_TOKEN: ${{ secrets.K6_CLOUD_TOKEN }} - K6_CLOUD_PROJECT_ID: ${{ secrets.K6_CLOUD_PROJECT_ID }} diff --git a/README.md b/README.md index 1a0318dca..de8d94328 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,7 @@ dotnet ef migrations add TestMigration Besides ordinary unit and integration tests, there are test suites for both functional and non-functional end-to-end tests implemented with [K6](https://k6.io/). -See `tests/k6/README.md` for more information. +See [tests/k6/README.md](tests/k6/README.md) for more information. ## Health Checks diff --git a/tests/k6/README.md b/tests/k6/README.md index b47da07df..9484e43cb 100644 --- a/tests/k6/README.md +++ b/tests/k6/README.md @@ -23,6 +23,14 @@ Run `Get-Help .\run.ps1` or `./run.sh --help` for usage information. The scripts will use locally installed `k6` if available. Failing that, it will attempt to use Docker with [grafana/k6](https://hub.docker.com/r/grafana/k6) +## Performance tests + +For information about the performance tests, see the following README files: + +- [ServiceOwner](./tests/serviceowner/performance/README.md) +- [GraphQL](./tests/graphql/performance/README.md) +- [EndUser](./tests/enduser/performance/README.md) + ## Test suites Various test suites are defined withing the `suites` directory. A suite consists of diff --git a/tests/k6/common/token.js b/tests/k6/common/token.js index c1f69ad34..2700f284f 100644 --- a/tests/k6/common/token.js +++ b/tests/k6/common/token.js @@ -31,7 +31,7 @@ function getCacheKey(tokenType, tokenOptions) { return `${tokenType}|${tokenOptions.scopes}|${tokenOptions.orgName}|${tokenOptions.orgNo}|${tokenOptions.ssn}`; } -function fetchToken(url, tokenOptions, type) { +export function fetchToken(url, tokenOptions, type) { const currentTime = Math.floor(Date.now() / 1000); const cacheKey = getCacheKey(type, tokenOptions); diff --git a/tests/k6/tests/enduser/performance/README.md b/tests/k6/tests/enduser/performance/README.md index c1af12afd..bcb02fe7e 100644 --- a/tests/k6/tests/enduser/performance/README.md +++ b/tests/k6/tests/enduser/performance/README.md @@ -10,7 +10,7 @@ The test file associated with this performance test is - `enduser-search.js` ## Test description -The test has a list of enduser (ssn) with pre-generated tokens, and the following endpoints are visited in +The test has a list of enduser (ssn), and the following endpoints are visited in sequence for each enduser: - api/v1/enduser/dialogs?Party=urn:altinn:person:identifier-no:`` - api/v1/enduser/dialogs/`` @@ -28,19 +28,14 @@ sequence for each enduser: ```shell cd tests/k6/tests/enduser/performance ``` -2. Generate tokens using the script below. Make sure to replace ``, `` and `` with your desired values: -```shell -TOKEN_GENERATOR_USERNAME= \ -TOKEN_GENERATOR_PASSWORD= API_ENVIRONMENT= \ -../../scripts/generate_tokens.sh ../../performancetest_data personal -``` -3. Run the test using the following command. Replace ``, ``, and `` with the desired values: +2. Run the test using the following command. Replace ``, ``, and `` with the desired values: ```shell +TOKEN_GENERATOR_USERNAME= TOKEN_GENERATOR_PASSWORD= \ k6 run enduser-search.js -e API_VERSION=v1 \ -e API_ENVIRONMENT= \ --vus= --duration= ``` -4. Refer to the k6 documentation for more information on usage. +3. Refer to the k6 documentation for more information on usage. ### From GitHub Actions To run the performance test using GitHub Actions, follow these steps: @@ -56,26 +51,16 @@ To run the performance test locally using GitHub Actions and act, perform the fo ```file TOKEN_GENERATOR_USERNAME: TOKEN_GENERATOR_PASSWORD: -K6_CLOUD_PROJECT_ID=** -K6_CLOUD_TOKEN=** -K6_PROMETHEUS_RW_USERNAME=** -K6_PROMETHEUS_RW_PASSWORD=** -K6_PROMETHEUS_RW_SERVER_URL=** ``` - Replace `` and ``, same as for generating tokens above. Fill in the K6_* values if available, - used for reporting to Grafana cloud + Replace `` and ``, same as for generating tokens above. ##### IMPORTANT: Ensure this file is added to .gitignore to prevent accidental commits of sensitive information. Never commit actual credentials to version control. 4. Run `act` using the command below. Replace `` and `` with the desired values: ```shell act workflow_dispatch -j k6-performance -s GITHUB_TOKEN=`gh auth token` \ --container-architecture linux/amd64 --artifact-server-path $HOME/.act \ --input vus= --input duration= \ ---input testSuitePath=tests/k6/tests/enduser/performance/enduser-search.js \ ---input tokens=personal +--input testSuitePath=tests/k6/tests/enduser/performance/enduser-search.js ``` ## Test Results -Test results can be found in GitHub action run log and in App Insights. We are prepared for exporting results to grafana, but so far results are exported to a private grafana instance only, as can be seen from the `.secrets`listed earlier - -## TODO -Fix reporting +Test results can be found in GitHub action run log, grafana and in App Insights. diff --git a/tests/k6/tests/enduser/performance/enduser-search.js b/tests/k6/tests/enduser/performance/enduser-search.js index 41f1b0d7c..ed20030e1 100644 --- a/tests/k6/tests/enduser/performance/enduser-search.js +++ b/tests/k6/tests/enduser/performance/enduser-search.js @@ -1,6 +1,6 @@ import { enduserSearch } from '../../performancetest_common/simpleSearch.js' import { getDefaultThresholds } from '../../performancetest_common/getDefaultThresholds.js'; -import { endUsersWithTokens } from '../../performancetest_common/readTestdata.js'; +import { endUsers } from '../../performancetest_common/readTestdata.js'; const isSingleUserMode = (__ENV.isSingleUserMode ?? 'false') === 'true'; const traceCalls = (__ENV.traceCalls ?? 'false') === 'true'; @@ -20,16 +20,16 @@ export let options = { }; export default function() { - if (!endUsersWithTokens || endUsersWithTokens.length === 0) { + if (!endUsers || endUsers.length === 0) { throw new Error('No end users loaded for testing'); } if (isSingleUserMode) { - enduserSearch(endUsersWithTokens[0], traceCalls); + enduserSearch(endUsers[0], traceCalls); } else { - for (let i = 0; i < endUsersWithTokens.length; i++) { - enduserSearch(endUsersWithTokens[i], traceCalls); + for (let i = 0; i < endUsers.length; i++) { + enduserSearch(endUsers[i], traceCalls); } } } diff --git a/tests/k6/tests/graphql/performance/README.md b/tests/k6/tests/graphql/performance/README.md index 7410616fe..3f1292b3d 100644 --- a/tests/k6/tests/graphql/performance/README.md +++ b/tests/k6/tests/graphql/performance/README.md @@ -15,19 +15,14 @@ The test file associated with this performance test is ```shell cd tests/k6/tests/graphql/performance ``` -2. Generate tokens using the script below. Make sure to replace ``, `` and `()` with your desired values: -```shell -TOKEN_GENERATOR_USERNAME= \ -TOKEN_GENERATOR_PASSWORD= API_ENVIRONMENT=<(test|staging|yt01)> \ -../../scripts/generate_tokens.sh ../../performancetest_data personal -``` -3. Run the test using the following command. Replace `<(test|staging|yt01)>`, ``, and `` with the desired values: +2. Run the test using the following command. Replace `<(test|staging|yt01)>`, ``, and `` with the desired values: ```shell +TOKEN_GENERATOR_USERNAME= TOKEN_GENERATOR_PASSWORD= \ k6 run graphql-search.js -e API_VERSION=v1 \ -e API_ENVIRONMENT=<(test|staging|yt01)> \ --vus= --duration= ``` -4. Refer to the k6 documentation for more information on usage. +3. Refer to the k6 documentation for more information on usage. ### From GitHub Actions To run the performance test using GitHub Actions, follow these steps: 1. Go to the [GitHub Actions](https://github.com/altinn/dialogporten/actions/workflows/dispatch-k6-performance.yml) page. @@ -42,26 +37,16 @@ To run the performance test locally using GitHub Actions and act, perform the fo ```file TOKEN_GENERATOR_USERNAME: TOKEN_GENERATOR_PASSWORD: -K6_CLOUD_PROJECT_ID=** -K6_CLOUD_TOKEN=** -K6_PROMETHEUS_RW_USERNAME=** -K6_PROMETHEUS_RW_PASSWORD=** -K6_PROMETHEUS_RW_SERVER_URL=** ``` - Replace `` and ``, same as for generating tokens above. Fill in the K6_* values if available, - used for reporting to Grafana cloud + Replace `` and ``, same as for generating tokens above. ##### IMPORTANT: Ensure this file is added to .gitignore to prevent accidental commits of sensitive information. Never commit actual credentials to version control. 4. Run `act` using the command below. Replace `` and `` with the desired values: ```shell act workflow_dispatch -j k6-performance -s GITHUB_TOKEN=`gh auth token` \ --container-architecture linux/amd64 --artifact-server-path $HOME/.act \ --input vus= --input duration= \ ---input testSuitePath=tests/k6/tests/graphql/performance/graphql-search.js \ ---input tokens=personal +--input testSuitePath=tests/k6/tests/graphql/performance/graphql-search.js ``` ## Test Results -Test results can be found in GitHub action run log and in App Insights. We are prepared for exporting results to grafana, but so far results are exported to a private grafana instance only, as can be seen from the `.secrets`listed earlier - -## TODO -Fix reporting +Test results can be found in GitHub action run log, grafana and in App Insights. \ No newline at end of file diff --git a/tests/k6/tests/graphql/performance/graphql-search.js b/tests/k6/tests/graphql/performance/graphql-search.js index d634dbfc9..91a615788 100644 --- a/tests/k6/tests/graphql/performance/graphql-search.js +++ b/tests/k6/tests/graphql/performance/graphql-search.js @@ -4,7 +4,7 @@ */ import { getDefaultThresholds } from '../../performancetest_common/getDefaultThresholds.js'; -import { endUsersWithTokens as endUsers } from '../../performancetest_common/readTestdata.js'; +import { endUsers } from '../../performancetest_common/readTestdata.js'; import { graphqlSearch } from "../../performancetest_common/simpleSearch.js"; const isSingleUserMode = (__ENV.isSingleUserMode ?? 'false') === 'true'; @@ -17,7 +17,7 @@ const traceCalls = (__ENV.traceCalls ?? 'false') === 'true'; * @property {string[]} summaryTrendStats - The summary trend statistics to include in the test results. * @property {object} thresholds - The thresholds for the test metrics. */ -export let options = { +export const options = { summaryTrendStats: ['avg', 'min', 'med', 'max', 'p(95)', 'p(99)', 'p(99.5)', 'p(99.9)', 'count'], thresholds: getDefaultThresholds(['http_req_duration', 'http_reqs'],['graphql search']) }; diff --git a/tests/k6/tests/performancetest_common/createDialog.js b/tests/k6/tests/performancetest_common/createDialog.js index 0768e4030..08e9d32e1 100644 --- a/tests/k6/tests/performancetest_common/createDialog.js +++ b/tests/k6/tests/performancetest_common/createDialog.js @@ -6,6 +6,8 @@ import { describe } from "../../common/describe.js"; import { postSO, purgeSO } from "../../common/request.js"; import { expect } from "../../common/testimports.js"; import dialogToInsert from "../performancetest_data/01-create-dialog.js"; +import { default as transmissionToInsert } from "../performancetest_data/create-transmission.js"; +import { getEnterpriseToken } from "./getTokens.js"; /** * Creates a dialog. @@ -13,16 +15,18 @@ import dialogToInsert from "../performancetest_data/01-create-dialog.js"; * @param {Object} serviceOwner - The service owner object. * @param {Object} endUser - The end user object. */ + export function createDialog(serviceOwner, endUser, traceCalls) { var traceparent = uuidv4(); var paramsWithToken = { headers: { - Authorization: "Bearer " + serviceOwner.token, + Authorization: "Bearer " + getEnterpriseToken(serviceOwner), traceparent: traceparent }, tags: { name: 'create dialog' } }; + if (traceCalls) { paramsWithToken.tags.traceparent = traceparent; paramsWithToken.tags.enduser = endUser.ssn; @@ -32,7 +36,6 @@ export function createDialog(serviceOwner, endUser, traceCalls) { let r = postSO('dialogs', dialogToInsert(endUser.ssn, endUser.resource), paramsWithToken); expect(r.status, 'response status').to.equal(201); }); - } /** @@ -45,7 +48,7 @@ export function createAndRemoveDialog(serviceOwner, endUser, traceCalls) { var traceparent = uuidv4(); var paramsWithToken = { headers: { - Authorization: "Bearer " + serviceOwner.token, + Authorization: "Bearer " + getEnterpriseToken(serviceOwner), traceparent: traceparent }, tags: { name: 'create dialog' } @@ -72,3 +75,64 @@ export function createAndRemoveDialog(serviceOwner, endUser, traceCalls) { } }); } + +/** + * Creates a dialog and add a number of transmissions + * + * @param {Object} serviceOwner - The service owner object. + * @param {Object} endUser - The end user object. + */ +export function createTransmissions(serviceOwner, endUser, traceCalls, numberOfTransmissions, maxTransmissionsInThread, testid) { + let traceparent = uuidv4(); + + let paramsWithToken = { + headers: { + Authorization: "Bearer " + getEnterpriseToken(serviceOwner), + traceparent: traceparent + }, + tags: { name: 'create dialog', testid: testid } + }; + if (traceCalls) { + paramsWithToken.tags.traceparent = traceparent; + paramsWithToken.tags.enduser = endUser.ssn; + } + + let dialogId = 0; + describe('create dialog', () => { + let r = postSO('dialogs', dialogToInsert(endUser.ssn, endUser.resource), paramsWithToken); + dialogId = r.json(); + expect(r.status, 'response status').to.equal(201); + }); + + let relatedTransmissionId = 0; + for (let i = 0; i < numberOfTransmissions; i++) { + + relatedTransmissionId = createTransmission(dialogId, relatedTransmissionId, serviceOwner, traceCalls, testid); + // Max transmissions in thread reached, start new thread + if (i%maxTransmissionsInThread === 0) { + relatedTransmissionId = 0; + } + } + +} + +export function createTransmission(dialogId, relatedTransmissionId, serviceOwner, traceCalls, testid) { + let traceparent = uuidv4(); + + let paramsWithToken = { + headers: { + Authorization: "Bearer " + getEnterpriseToken(serviceOwner), + traceparent: traceparent + }, + tags: { name: 'create transmission', testid: testid } + }; + + let newRelatedTransmissionId; + describe('create transmission', () => { + let r = postSO('dialogs/' + dialogId + '/transmissions', transmissionToInsert(relatedTransmissionId), paramsWithToken); + expect(r.status, 'response status').to.equal(201); + newRelatedTransmissionId = r.json(); + }); + return newRelatedTransmissionId; +} + diff --git a/tests/k6/tests/performancetest_common/getTokens.js b/tests/k6/tests/performancetest_common/getTokens.js new file mode 100644 index 000000000..20a49cd46 --- /dev/null +++ b/tests/k6/tests/performancetest_common/getTokens.js @@ -0,0 +1,23 @@ +import { fetchToken } from "../../common/token.js"; + +const tokenGeneratorEnv = __ENV.API_ENVIRONMENT || 'yt01'; +const tokenTtl = __ENV.TTL || 3600; + +export function getEnterpriseToken(serviceOwner) { + const tokenOptions = { + scopes: serviceOwner.scopes, + orgName: serviceOwner.org, + orgNo: serviceOwner.orgno + } + const url = `https://altinn-testtools-token-generator.azurewebsites.net/api/GetEnterpriseToken?env=${tokenGeneratorEnv}&scopes=${encodeURIComponent(tokenOptions.scopes)}&org=${tokenOptions.orgName}&orgNo=${tokenOptions.orgNo}&ttl=${tokenTtl}`; + return fetchToken(url, tokenOptions, `service owner (orgno:${tokenOptions.orgNo} orgName:${tokenOptions.orgName} tokenGeneratorEnv:${tokenGeneratorEnv})`); +} + +export function getPersonalToken(endUser) { + const tokenOptions = { + scopes: endUser.scopes, + ssn: endUser.ssn + } + const url = `https://altinn-testtools-token-generator.azurewebsites.net/api/GetPersonalToken?env=${tokenGeneratorEnv}&scopes=${encodeURIComponent(tokenOptions.scopes)}&pid=${tokenOptions.ssn}&ttl=${tokenTtl}`; + return fetchToken(url, tokenOptions, `end user (ssn:${tokenOptions.ssn}, tokenGeneratorEnv:${tokenGeneratorEnv})`); + } diff --git a/tests/k6/tests/performancetest_common/readTestdata.js b/tests/k6/tests/performancetest_common/readTestdata.js index 538c292a3..39dbbea70 100644 --- a/tests/k6/tests/performancetest_common/readTestdata.js +++ b/tests/k6/tests/performancetest_common/readTestdata.js @@ -23,12 +23,11 @@ function readCsv(filename) { } } -const filenameServiceowners = '../performancetest_data/.serviceowners-with-tokens.csv'; if (!__ENV.API_ENVIRONMENT) { throw new Error('API_ENVIRONMENT must be set'); } const filenameEndusers = `../performancetest_data/endusers-${__ENV.API_ENVIRONMENT}.csv`; -const filenameEndusersWithTokens = '../performancetest_data/.endusers-with-tokens.csv'; +const filenameServiceowners = `../performancetest_data/serviceowners-${__ENV.API_ENVIRONMENT}.csv`; /** * SharedArray variable that stores the service owners data. @@ -53,14 +52,4 @@ export const endUsers = new SharedArray('endUsers', function () { return readCsv(filenameEndusers); }); -/** - * SharedArray variable that stores the end users with tokens data. - * The data is parsed from the CSV file specified by the filenameEndusersWithTokens variable. - * - * @name endUsersWithTokens - * @type {SharedArray} - */ -export const endUsersWithTokens = new SharedArray('endUsersWithTokens', function () { - return readCsv(filenameEndusersWithTokens); -}); diff --git a/tests/k6/tests/performancetest_common/simpleSearch.js b/tests/k6/tests/performancetest_common/simpleSearch.js index 8e0f7ef0c..b39f6470f 100644 --- a/tests/k6/tests/performancetest_common/simpleSearch.js +++ b/tests/k6/tests/performancetest_common/simpleSearch.js @@ -7,6 +7,7 @@ import { expect, expectStatusFor } from "../../common/testimports.js"; import { describe } from '../../common/describe.js'; import { getEU, postGQ, getSO } from '../../common/request.js'; import { getGraphqlParty } from '../performancetest_data/graphql-search.js'; +import { getEnterpriseToken, getPersonalToken } from './getTokens.js'; /** * Retrieves the content for a dialog. @@ -20,7 +21,6 @@ function retrieveDialogContent(response, paramsWithToken, getFunction = getEU) { if (!items?.length) return; const dialogId = items[0].id; if (!dialogId) return; - getContent(dialogId, paramsWithToken, 'get dialog', '', getFunction); getContentChain(dialogId, paramsWithToken, 'get dialog activities', 'get dialog activity', '/activities/', getFunction); getContentChain(dialogId, paramsWithToken, 'get seenlogs', 'get seenlog', '/seenlog/', getFunction); @@ -45,7 +45,7 @@ export function enduserSearch(enduser, traceCalls) { var traceparent = uuidv4(); let paramsWithToken = { headers: { - Authorization: "Bearer " + enduser.token, + Authorization: "Bearer " + getPersonalToken(enduser), traceparent: traceparent }, tags: { name: 'enduser search' } @@ -132,7 +132,7 @@ export function graphqlSearch(enduser, traceCalls) { let traceparent = uuidv4(); let paramsWithToken = { headers: { - Authorization: "Bearer " + enduser.token, + Authorization: "Bearer " + getPersonalToken(enduser), traceparent: traceparent, 'User-Agent': 'dialogporten-k6-graphql-search' }, @@ -160,7 +160,7 @@ export function serviceownerSearch(serviceowner, enduser, tag_name, traceCalls, let traceparent = uuidv4(); let paramsWithToken = { headers: { - Authorization: "Bearer " + serviceowner.token, + Authorization: "Bearer " + getEnterpriseToken(serviceowner), traceparent: traceparent }, tags: { name: tag_name } diff --git a/tests/k6/tests/performancetest_data/01-create-dialog.js b/tests/k6/tests/performancetest_data/01-create-dialog.js index 551d324cc..cd467a456 100644 --- a/tests/k6/tests/performancetest_data/01-create-dialog.js +++ b/tests/k6/tests/performancetest_data/01-create-dialog.js @@ -54,5 +54,6 @@ export default function (endUser, resource) { let payload = createDialogPayload(); payload.serviceResource = "urn:altinn:resource:" +resource; payload.party = "urn:altinn:person:identifier-no:" + endUser; + return cleanUp(payload); } diff --git a/tests/k6/tests/performancetest_data/create-transmission.js b/tests/k6/tests/performancetest_data/create-transmission.js new file mode 100644 index 000000000..cf7413475 --- /dev/null +++ b/tests/k6/tests/performancetest_data/create-transmission.js @@ -0,0 +1,65 @@ + +import { uuidv7 } from "../../common/uuid.js"; + +export default function (relatedTransmissionId) { + let transmission = { + "id": uuidv7(), + "createdAt": new Date().toISOString(), + "authorizationAttribute": "element1", + "extendedType": "string", + "type": "Information", + "sender": { + "actorType": "serviceOwner" + }, + "content": { + "title": { + "value": [ + { + "value": "Forsendelsestittel", + "languageCode": "nb" + }, + { + "languageCode": "en", + "value": "Transmission title" + } + ], + }, + "summary": { + "value": [ + { + "languageCode": "nb", + "value": "Forsendelse oppsummering" + }, + { + "languageCode": "en", + "value": "Transmission summary" + } + ], + } + }, + "attachments": [ + { + "displayName": [ + { + "languageCode": "nb", + "value": "Forsendelse visningsnavn" + }, + { + "languageCode": "en", + "value": "Transmission attachment display name" + } + ], + "urls": [ + { + "url": "https://digdir.apps.tt02.altinn.no/some-other-url", + "consumerType": "Gui" + } + ] + } + ] + } + if (relatedTransmissionId != 0) { + transmission.relatedTransmissionId = relatedTransmissionId; + } + return transmission; +} \ No newline at end of file diff --git a/tests/k6/tests/scripts/generate_tokens.sh b/tests/k6/tests/scripts/generate_tokens.sh deleted file mode 100755 index dea5ab4db..000000000 --- a/tests/k6/tests/scripts/generate_tokens.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/bin/bash - -# Check if required environment variables are set -if [ -z "$TOKEN_GENERATOR_USERNAME" ] || [ -z "$TOKEN_GENERATOR_PASSWORD" ] || [ -z "$API_ENVIRONMENT" ]; then - echo "Error: TOKEN_GENERATOR_USERNAME, TOKEN_GENERATOR_PASSWORD, and API_ENVIRONMENT must be set" - exit 1 -fi - -# Function to display usage information -usage() { - echo "Usage: $0 " - echo " : Path to the test data files" - echo " : Type of tokens to generate (both, enterprise, or personal)" - echo " : limit number of tokens to generate. 0 means generate all" - echo " : Time to live in seconds for the generated tokens" - echo "Example: $0 /path/to/testdata both 10 3600" - exit 1 -} - -# Validate arguments -if [ $# -ne 4 ]; then - usage -fi - -tokengenuser=${TOKEN_GENERATOR_USERNAME} -tokengenpasswd=${TOKEN_GENERATOR_PASSWORD} - -env="" -case $API_ENVIRONMENT in - "test") - env="at21" ;; - "staging") - env="tt02" ;; - "yt01") - env="yt01" ;; - "localdev") - env="at21" ;; - *) - echo "Error: Unknown api environment $API_ENVIRONMENT" - exit 1 ;; -esac - -testdatafilepath=$1 -tokens=$2 -limit=$3 -ttl=$4 - -# Validate tokens argument -if [[ ! "$tokens" =~ ^(both|enterprise|personal)$ ]]; then - echo "Error: Invalid token type. Must be 'both', 'enterprise', or 'personal'." - usage -fi - -serviceowner_datafile="$testdatafilepath/serviceowners-$API_ENVIRONMENT.csv" -serviceowner_tokenfile="$testdatafilepath/.serviceowners-with-tokens.csv" -enduser_datafile="$testdatafilepath/endusers-$API_ENVIRONMENT.csv" -enduser_tokenfile="$testdatafilepath/.endusers-with-tokens.csv" - -if [ "$tokens" = "both" ] || [ "$tokens" = "enterprise" ]; then - if [ ! -f "$serviceowner_datafile" ]; then - echo "Error: Input file not found: $serviceowner_datafile" - exit 1 - fi - echo "org,orgno,scopes,resource,token" > $serviceowner_tokenfile - generated=0 - while IFS=, read -r org orgno scopes resource - do - if [ $limit -gt 0 ] && [ $generated -ge $limit ]; then - break - fi - url="https://altinn-testtools-token-generator.azurewebsites.net/api/GetEnterpriseToken?org=$org&env=$env&orgno=$orgno&ttl=$ttl" - token=$(curl -s -f --get --data-urlencode "scopes=$scopes" $url -u "$tokengenuser:$tokengenpasswd" ) - if [ $? -ne 0 ]; then - echo "Error: Failed to generate enterprise token for: $env, $org, $orgno, $scopes " - continue - fi - echo "$org,$orgno,$scopes,$resource,$token" >> $serviceowner_tokenfile - status=$? - if [ $status -ne 0 ]; then - echo "Error: Failed to write enterprise token to file for: $env, $org, $orgno, $scopes" - else - ((generated++)) - fi - done < <(tail -n +2 $serviceowner_datafile) -fi - -if [ "$tokens" = "both" ] || [ "$tokens" = "personal" ]; then - if [ ! -f "$enduser_datafile" ]; then - echo "Error: Input file not found: $enduser_datafile" - exit 1 - fi - echo "ssn,resource,scopes,token" > $enduser_tokenfile - generated=0 - while IFS=, read -r ssn resource scopes - do - if [ $limit -gt 0 ] && [ $generated -ge $limit ]; then - break - fi - url="https://altinn-testtools-token-generator.azurewebsites.net/api/GetPersonalToken?env=$env&scopes=$scopes&pid=$ssn&ttl=$ttl" - token=$(curl -s -f $url -u "$tokengenuser:$tokengenpasswd" ) - if [ $? -ne 0 ]; then - echo "Error: Failed to generate personal token for: $ssn, $scopes " - continue - fi - echo "$ssn,$resource,$scopes,$token" >> $enduser_tokenfile - status=$? - if [ $status -ne 0 ]; then - echo "Error: Failed to write personal token to file for: $ssn, $scopes" - else - ((generated++)) - fi - done < <(tail -n +2 $enduser_datafile) -fi diff --git a/tests/k6/tests/scripts/run-test-in-k8s.sh b/tests/k6/tests/scripts/run-test-in-k8s.sh new file mode 100755 index 000000000..0873f5a4d --- /dev/null +++ b/tests/k6/tests/scripts/run-test-in-k8s.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +tokengenuser=${TOKEN_GENERATOR_USERNAME} +tokengenpasswd=${TOKEN_GENERATOR_PASSWORD} + +# Validate required environment variables +if [ -z "$TOKEN_GENERATOR_USERNAME" ] || [ -z "$TOKEN_GENERATOR_PASSWORD" ]; then + echo "Error: TOKEN_GENERATOR_USERNAME and TOKEN_GENERATOR_PASSWORD must be set" + exit 1 +fi + +help() { + echo "Usage: $0 [OPTIONS]" + echo "Options:" + echo " -f, --filename Specify the filename of the k6 script archive" + echo " -c, --configmapname Specify the name of the configmap to create" + echo " -n, --name Specify the name of the test run" + echo " -v, --vus Specify the number of virtual users" + echo " -d, --duration Specify the duration of the test" + echo " -p, --parallelism Specify the level of parallelism" + echo " -h, --help Show this help message" + exit 0 +} + +print_logs() { + POD_LABEL="k6-test=$name" + K8S_CONTEXT="${K8S_CONTEXT:-k6tests-cluster}" + K8S_NAMESPACE="${K8S_NAMESPACE:-default}" + LOG_TIMEOUT="${LOG_TIMEOUT:-60}" + # Verify kubectl access + if ! kubectl --context "$K8S_CONTEXT" -n "$K8S_NAMESPACE" get pods &>/dev/null; then + echo "Error: Failed to access Kubernetes cluster" + return 1 + fi + for pod in $(kubectl --context "$K8S_CONTEXT" -n "$K8S_NAMESPACE" get pods -l "$POD_LABEL" -o name); do + if [[ $pod != *"initializer"* ]]; then + echo --------------------------- + echo $pod + echo --------------------------- + kubectl --context "$K8S_CONTEXT" -n "$K8S_NAMESPACE" logs --tail=-1 $pod + fi + done +} + +while [[ $# -gt 0 ]]; do + case "$1" in + -h|--help) + help + ;; + -f|--filename) + filename="$2" + shift 2 + ;; + -c|--configmapname) + configmapname="$2" + shift 2 + ;; + -n|--name) + name="$2" + shift 2 + ;; + -v|--vus) + vus="$2" + shift 2 + ;; + -d|--duration) + duration="$2" + shift 2 + ;; + -p|--parallelism) + parallelism="$2" + shift 2 + ;; + *) + echo "Invalid option: $1" + help + exit 1 + ;; + esac +done + +# Validate required arguments +missing_args=() +[ -z "$filename" ] && missing_args+=("filename (-f)") +[ -z "$configmapname" ] && missing_args+=("configmapname (-c)") +[ -z "$name" ] && missing_args+=("name (-n)") +[ -z "$vus" ] && missing_args+=("vus (-v)") +[ -z "$duration" ] && missing_args+=("duration (-d)") +[ -z "$parallelism" ] && missing_args+=("parallelism (-p)") + +if [ ${#missing_args[@]} -ne 0 ]; then + echo "Error: Missing required arguments: ${missing_args[*]}" + help + exit 1 +fi +# Set testid to name + timestamp +testid="${name}_$(date '+%Y%m%dT%H%M%S')" + +# Create the k6 archive +if ! k6 archive $filename -e API_VERSION=v1 -e API_ENVIRONMENT=yt01 -e TOKEN_GENERATOR_USERNAME=$tokengenuser -e TOKEN_GENERATOR_PASSWORD=$tokengenpasswd -e TESTID=$testid; then + echo "Error: Failed to create k6 archive" + exit 1 +fi +# Create the configmap from the archive +if ! kubectl create configmap $configmapname --from-file=archive.tar; then + echo "Error: Failed to create configmap" + rm archive.tar + exit 1 +fi + +# Create the config.yml file from a string +cat < config.yml +apiVersion: k6.io/v1alpha1 +kind: TestRun +metadata: + name: $name +spec: + arguments: --out experimental-prometheus-rw --vus=$vus --duration=$duration + parallelism: $parallelism + script: + configMap: + name: $configmapname + file: archive.tar + runner: + env: + - name: K6_PROMETHEUS_RW_SERVER_URL + value: "http://kube-prometheus-stack-prometheus.monitoring:9090/api/v1/write" + - name: K6_PROMETHEUS_RW_TREND_STATS + value: "avg,min,med,max,p(95),p(99),p(99.5),p(99.9),count" + metadata: + labels: + k6-test: $name + +EOF +# Apply the config.yml configuration +kubectl apply -f config.yml + +# Wait for the job to finish +wait_timeout="${duration}100s" +kubectl --context k6tests-cluster wait --for=jsonpath='{.status.stage}'=finished testrun/$name --timeout=$wait_timeout + +# Print the logs of the pods +print_logs + +cleanup() { + local exit_code=$? + echo "Cleaning up resources..." + + if [ -f "config.yml" ]; then + kubectl delete -f config.yml --ignore-not-found || true + rm -f config.yml + fi + + if kubectl get configmap $configmapname &>/dev/null; then + kubectl delete configmap $configmapname --ignore-not-found || true + fi + + rm -f archive.tar + + exit $exit_code +} +trap cleanup EXIT \ No newline at end of file diff --git a/tests/k6/tests/serviceowner/performance/README.md b/tests/k6/tests/serviceowner/performance/README.md index 778a4f504..5e3f220e1 100644 --- a/tests/k6/tests/serviceowner/performance/README.md +++ b/tests/k6/tests/serviceowner/performance/README.md @@ -10,6 +10,7 @@ Before running the performance test, make sure you have met the following prereq The test files associated with this performance test are - `create-dialog.js` - `create-remove-dialog.js` +- `create-transmissions.js` - `serviceowner-search.js` - `purge-dialogs.js` (used for cleanup after test) @@ -21,19 +22,14 @@ To run the performance test, follow the instructions below: ```shell cd tests/k6/tests/serviceowner/performance ``` -2. Generate tokens using the script below. Make sure to replace ``, `` and `<(test|staging|yt01)>` with your actual desired values: -```shell -TOKEN_GENERATOR_USERNAME= \ -TOKEN_GENERATOR_PASSWORD= API_ENVIRONMENT=<(test|staging|yt01)> \ -../../scripts/generate_tokens.sh ../../performancetest_data both -``` -3. Run the test using the following command. Replace ``, `<(test|staging|yt01)>`, ``, and `` with the desired values: +2. Run the test using the following command. Replace ``, `<(test|staging|yt01)>`, ``, and `` with the desired values: ```shell +TOKEN_GENERATOR_USERNAME= TOKEN_GENERATOR_PASSWORD= \ k6 run -e API_VERSION=v1 \ -e API_ENVIRONMENT=<(test|staging|yt01)> \ --vus= --duration= ``` -4. Refer to the k6 documentation for more information on usage. +3. Refer to the k6 documentation for more information on usage. #### From GitHub Actions To run the performance test using GitHub Actions, follow these steps: @@ -49,22 +45,15 @@ To run the performance test locally using GitHub Actions and act, perform the fo ```file TOKEN_GENERATOR_USERNAME: TOKEN_GENERATOR_PASSWORD: -K6_CLOUD_PROJECT_ID=** -K6_CLOUD_TOKEN=** -K6_PROMETHEUS_RW_USERNAME=** -K6_PROMETHEUS_RW_PASSWORD=** -K6_PROMETHEUS_RW_SERVER_URL=** ``` - Replace `` and ``, same as for generating tokens above. Fill in the K6_* values if available, - used for reporting to Grafana cloud + Replace `` and ``, same as for generating tokens above. ##### IMPORTANT: Ensure this file is added to .gitignore to prevent accidental commits of sensitive information. Never commit actual credentials to version control. 4. Run `act` using the command below. Replace ``, ``, `` and `<(personal|enterprise|both)>` with the desired values: ```shell act workflow_dispatch -j k6-performance -s GITHUB_TOKEN=`gh auth token` \ --container-architecture linux/amd64 --artifact-server-path $HOME/.act \ --input vus= --input duration= \ ---input testSuitePath= \ ---input tokens=<(personal|enterprise|both)> +--input testSuitePath= ``` Example of command: @@ -72,8 +61,7 @@ Example of command: act workflow_dispatch -j k6-performance -s GITHUB_TOKEN=`gh auth token` \ --container-architecture linux/amd64 --artifact-server-path $HOME/.act \ --input vus=10 --input duration=5m \ ---input testSuitePath=tests/k6/tests/serviceowner/performance/create-dialog.js \ ---input tokens=enterprise +--input testSuitePath=tests/k6/tests/serviceowner/performance/create-dialog.js ``` #### Clean up @@ -97,7 +85,4 @@ Replace `<(test|staging|yt01)>` with the appropriate environment where the test This script will remove any dialogs created during the performance test, ensuring a clean state for future tests. ### Test Results -The test results can be found in the GitHub Actions run log and in App Insights. Currently, the results are exported to a private Grafana instance. Refer to the `.secrets` file mentioned earlier for more details. - -### TODO -- Fix reporting +The test results can be found in the GitHub Actions run log, grafana and in App Insights. diff --git a/tests/k6/tests/serviceowner/performance/create-transmissions.js b/tests/k6/tests/serviceowner/performance/create-transmissions.js new file mode 100644 index 000000000..00380fd97 --- /dev/null +++ b/tests/k6/tests/serviceowner/performance/create-transmissions.js @@ -0,0 +1,38 @@ +/** + * Performance test for creating dialogs with transmissions + * Run: k6 run tests/k6/tests/serviceowner/performance/create-transmissions.js --vus 1 --iterations 1 -e numberOfTransmissions=100 + */ +import { randomItem } from 'https://jslib.k6.io/k6-utils/1.4.0/index.js'; +import { getDefaultThresholds } from '../../performancetest_common/getDefaultThresholds.js'; +import { createTransmissions } from '../../performancetest_common/createDialog.js'; +import { serviceOwners, endUsers } from '../../performancetest_common/readTestdata.js'; + +export const options = { + summaryTrendStats: ['avg', 'min', 'med', 'max', 'p(95)', 'p(99)', 'p(99.5)', 'p(99.9)', 'count'], + thresholds: getDefaultThresholds(['http_req_duration', 'http_reqs'],['create dialog', 'create transmission']) +}; + +const isSingleUserMode = (__ENV.isSingleUserMode ?? 'false') === 'true'; +const traceCalls = (__ENV.traceCalls ?? 'false') === 'true'; +const numberOfTransmissions = (__ENV.numberOfTransmissions ?? '10'); +const maxTransmissionsInThread = (__ENV.maxTransmissionsInThread ?? '100'); +const testid = (__ENV.TESTID ?? 'createTransmissions'); + +export default function() { + if (!endUsers || endUsers.length === 0) { + throw new Error('No end users loaded for testing'); + } + if (!serviceOwners || serviceOwners.length === 0) { + throw new Error('No service owners loaded for testing'); + } + if (isSingleUserMode) { + createTransmissions(serviceOwners[0], endUsers[0], traceCalls, numberOfTransmissions, maxTransmissionsInThread, testid); + } + else { + let serviceOwner = randomItem(serviceOwners); + for (const endUser of endUsers) { + createTransmissions(serviceOwner, endUser, traceCalls, numberOfTransmissions, maxTransmissionsInThread, testid); + } + } + } +